diff --git a/.ci-operator.yaml b/.ci-operator.yaml new file mode 100644 index 0000000000000..7c15f83e3e6b4 --- /dev/null +++ b/.ci-operator.yaml @@ -0,0 +1,4 @@ +build_root_image: + name: release + namespace: openshift + tag: rhel-9-release-golang-1.23-openshift-4.19 diff --git a/.gitignore b/.gitignore index c1915d7a1544f..fc737de69ed13 100644 --- a/.gitignore +++ b/.gitignore @@ -124,3 +124,10 @@ zz_generated_*_test.go # generated by verify-vendor.sh vendordiff.patch + +# Ignore openshift source archives produced as part of rpm build +openshift*.tar.gz + +# Ensure that openapi definitions are not ignored to ensure that +# openshift/origin can vendor them. +!pkg/generated/openapi/zz_generated.openapi.go diff --git a/.openshift-tests-extension/openshift_payload_hyperkube.json b/.openshift-tests-extension/openshift_payload_hyperkube.json new file mode 100644 index 0000000000000..23e86487fa5f1 --- /dev/null +++ b/.openshift-tests-extension/openshift_payload_hyperkube.json @@ -0,0 +1,80079 @@ +[ + { + "name": "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should sign the new added bootstrap tokens [Disabled:Unimplemented] [Suite:k8s]", + "labels": { + "Feature:BootstrapTokens": {}, + "sig-cluster-lifecycle": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should resign the bootstrap tokens when the clusterInfo ConfigMap updated [Serial] [Disruptive] [Disabled:Unimplemented] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:BootstrapTokens": {}, + "Serial": {}, + "sig-cluster-lifecycle": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should delete the signed bootstrap tokens from clusterInfo ConfigMap when bootstrap token is deleted [Disabled:Unimplemented] [Suite:k8s]", + "labels": { + "Feature:BootstrapTokens": {}, + "sig-cluster-lifecycle": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should delete the token secret when the secret expired [Disabled:Unimplemented] [Suite:k8s]", + "labels": { + "Feature:BootstrapTokens": {}, + "sig-cluster-lifecycle": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should not delete the token secret when the secret is not expired [Disabled:Unimplemented] [Suite:k8s]", + "labels": { + "Feature:BootstrapTokens": {}, + "sig-cluster-lifecycle": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-architecture] Conformance Tests should have at least two untainted nodes [Conformance] [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-architecture": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] Certificates API [Privileged:ClusterAdmin] should support building a client with a CSR [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] Certificates API [Privileged:ClusterAdmin] should support CSR API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthenticator] The kubelet's main port 10250 should reject requests with no credentials [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NodeAuthenticator": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthenticator] The kubelet can delegate ServiceAccount tokens to the API server [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NodeAuthenticator": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthorizer] Getting a non-existent secret should exit with the Forbidden error, not a NotFound error [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NodeAuthorizer": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthorizer] Getting an existing secret should exit with the Forbidden error [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NodeAuthorizer": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthorizer] Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NodeAuthorizer": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthorizer] Getting an existing configmap should exit with the Forbidden error [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NodeAuthorizer": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthorizer] Getting a secret for a workload the node has access to should succeed [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NodeAuthorizer": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthorizer] A node shouldn't be able to create another node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NodeAuthorizer": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthorizer] A node shouldn't be able to delete another node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NodeAuthorizer": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:ClusterTrustBundle] [Feature:ClusterTrustBundleProjection] should be able to mount a single ClusterTrustBundle by name [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:ClusterTrustBundle": {}, + "Feature:ClusterTrustBundleProjection": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] SelfSubjectReview testing SSR in different API groups authentication/v1beta1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] SelfSubjectReview testing SSR in different API groups authentication/v1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] SelfSubjectReview should support SelfSubjectReview API operations [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts no secret-based service account token should be auto-generated [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should mount an API token into pods [Conformance] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should allow opting out of API token automount [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should mount projected service account token [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should run through the lifecycle of a ServiceAccount [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should guarantee kube-root-ca.crt exist in any namespace [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should update a ServiceAccount [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] SubjectReview should support SubjectReview API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Pods should function for intra-pod communication: http [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Pods should function for intra-pod communication: udp [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Pods should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Pods should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Pods should function for intra-pod communication: sctp [LinuxOnly] [Feature:SCTPConnectivity] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Pods should function for node-pod communication: sctp [LinuxOnly] [Feature:SCTPConnectivity] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API [Serial] [Disruptive] [Feature:EphemeralStorage] Downward API tests for local ephemeral storage should provide container's limits.ephemeral-storage and requests.ephemeral-storage as env vars [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:EphemeralStorage": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API [Serial] [Disruptive] [Feature:EphemeralStorage] Downward API tests for local ephemeral storage should provide default limits.ephemeral-storage from node allocatable [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:EphemeralStorage": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] new files should be created with FSGroup ownership when container is root [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] new files should be created with FSGroup ownership when container is non-root [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] nonexistent volume subPath should have the correct mode and owner using FSGroup [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] files with FSGroup ownership should support (root,0644,tmpfs) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] volume on default medium should have the correct mode using FSGroup [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] volume on tmpfs should have the correct mode using FSGroup [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes pod should support shared volumes between containers [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes pod should support memory backed volumes of specified size [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPath should give a volume the correct mode [LinuxOnly] [NodeConformance] [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPath should support r/w [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPath should support subPath [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected combined should project all components that make up the projection API [Projection] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume as non-root [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume with mappings [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap binary data should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap optional updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be immutable if `immutable` field is set [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap Should fail non-optional pod creation due to configMap object does not exist [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide podname only [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should update labels on modification [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should update annotations on modification [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide container's cpu limit [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide container's memory limit [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide container's cpu request [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide container's memory request [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume as non-root [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume with mappings [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap optional updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap Should fail non-optional pod creation due to configMap object does not exist [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide podname only [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should update labels on modification [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should update annotations on modification [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide container's cpu limit [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide container's memory limit [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide container's cpu request [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide container's memory request [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret should be consumable from pods in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret should be consumable from pods in volume with mappings [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret optional updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret Should fail non-optional pod creation due to secret object does not exist [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret Should fail non-optional pod creation due to the key in the secret object does not exist [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be consumable from pods in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be consumable from pods in volume with mappings [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets optional updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be immutable if `immutable` field is set [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets Should fail non-optional pod creation due to secret object does not exist [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets Should fail non-optional pod creation due to the key in the secret object does not exist [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Volumes NFSv4 should be mountable for NFSv4 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Volumes NFSv3 should be mountable for NFSv3 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] Logging soak [Performance] [Slow] [Disruptive] should survive logging 1KB every 1s seconds, for a duration of 2m0s [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-instrumentation": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] MetricsGrabber should grab all metrics from API server. [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-instrumentation": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] MetricsGrabber should grab all metrics from a Kubelet. [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-instrumentation": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] MetricsGrabber should grab all metrics from a Scheduler. [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-instrumentation": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] MetricsGrabber should grab all metrics from a ControllerManager. [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-instrumentation": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] MetricsGrabber should grab all metrics slis from API server. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-instrumentation": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] Events should manage the lifecycle of an event [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-instrumentation": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] Events should delete a collection of events [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-instrumentation": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] Events API should ensure that an event can be fetched, patched, deleted, and listed [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-instrumentation": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] Events API should delete a collection of events [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-instrumentation": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] Metrics should grab all metrics from kubelet /metrics/resource endpoint [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-instrumentation": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should support a 'default-deny-ingress' policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should support a 'default-deny-all' policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic from pods within server namespace based on PodSelector [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow ingress traffic for a target [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow ingress traffic from pods in all namespaces [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on PodSelector with MatchExpressions [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on NamespaceSelector with MatchExpressions [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on PodSelector or NamespaceSelector [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on Multiple PodSelectors and NamespaceSelectors [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on any PodSelectors [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on Ports [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should support allow-all policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access from namespace on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should allow egress access on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce updated policy [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access from updated namespace [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access from updated pod [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should deny ingress from pods on other namespaces [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should deny ingress access to updated pod [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should deny egress from pods based on PodSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should deny egress from all pods in a namespace [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should work with Ingress, Egress specified together [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should support denying of egress traffic on the client side (even if the server explicitly allows this traffic) [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce ingress policy allowing any port traffic to a server on a specific protocol [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should stop enforcing policies after they are deleted [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should allow egress access to server in CIDR block [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should not mistakenly treat 'protocol: SCTP' as 'protocol: TCP', even if the plugin doesn't support SCTP [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should properly isolate pods that are selected by a policy allowing SCTP, even if the plugin doesn't support SCTP [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should not allow access by TCP when a policy specifies only UDP [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic based on NamespaceSelector with MatchLabels using default ns label [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on NamespaceSelector with MatchExpressions using default ns label [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should support a 'default-deny-ingress' policy [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should enforce policy based on Ports [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol [Feature:SCTPConnectivity] [LinuxOnly] NetworkPolicy between server and client using SCTP should support a 'default-deny-ingress' policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol [Feature:SCTPConnectivity] [LinuxOnly] NetworkPolicy between server and client using SCTP should enforce policy based on Ports [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol [Feature:SCTPConnectivity] [LinuxOnly] NetworkPolicy between server and client using SCTP should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol API should support creating NetworkPolicy API operations [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol API should support creating NetworkPolicy API with endport field [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet registers plugin [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet must retry NodePrepareResources [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet must not run a pod if a claim is not ready [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet must unprepare resources for force-deleted pod [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet must call NodePrepareResources even if not used by any container [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet must map configs and devices to the right containers [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports claim and class parameters [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports reusing resources [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports sharing a claim concurrently [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports sharing a claim sequentially [Slow] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node retries pod scheduling after creating device class [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node retries pod scheduling after updating device class [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node runs a pod without a generated resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports simple pod referencing inline resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports inline claim referenced by multiple containers [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports simple pod referencing external resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports external claim referenced by multiple pods [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports external claim referenced by multiple containers of multiple pods [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports init containers [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node removes reservation from claim when pod is done [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node deletes generated claims when pod is done [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node does not delete generated claims when pod is restarting [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node must deallocate after use [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on multiple nodes with network-attached resources schedules onto different nodes [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on multiple nodes with network-attached resources [Serial] [Disruptive] [Slow] must deallocate on non graceful node shutdown [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "Serial": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on multiple nodes reallocation works [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on multiple nodes with node-local resources uses all resources [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports claim and class parameters [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports reusing resources [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports sharing a claim concurrently [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports sharing a claim sequentially [Slow] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node retries pod scheduling after creating device class [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node retries pod scheduling after updating device class [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node runs a pod without a generated resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports simple pod referencing inline resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports inline claim referenced by multiple containers [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports simple pod referencing external resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports external claim referenced by multiple pods [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports external claim referenced by multiple containers of multiple pods [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports init containers [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node removes reservation from claim when pod is done [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node deletes generated claims when pod is done [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node does not delete generated claims when pod is restarting [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node must deallocate after use [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on multiple nodes with different ResourceSlices keeps pod pending because of CEL runtime errors [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on multiple nodes with node-local resources uses all resources [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster support validating admission policy for admin access [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster truncates the name of a generated resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster supports count/resourceclaims.resource.k8s.io ResourceQuota [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports scheduled pod referencing inline resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports scheduled pod referencing external resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports simple pod referencing inline resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports inline claim referenced by multiple containers [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports simple pod referencing external resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports external claim referenced by multiple pods [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports external claim referenced by multiple containers of multiple pods [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports init containers [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor removes reservation from claim when pod is done [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor deletes generated claims when pod is done [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor does not delete generated claims when pod is restarting [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor must deallocate after use [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports scheduled pod referencing inline resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports scheduled pod referencing external resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports simple pod referencing inline resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports inline claim referenced by multiple containers [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports simple pod referencing external resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports external claim referenced by multiple pods [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports external claim referenced by multiple containers of multiple pods [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports init containers [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor removes reservation from claim when pod is done [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor deletes generated claims when pod is done [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor does not delete generated claims when pod is restarting [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor must deallocate after use [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with structured parameters must apply per-node permission checks [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with structured parameters must manage ResourceSlices [Slow] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] multiple drivers using only drapbv1alpha3 work [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] ConfigMap should be consumable via environment variable [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] ConfigMap should be consumable via the environment [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] ConfigMap should fail to create ConfigMap with empty key [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] ConfigMap should update ConfigMap successfully [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] ConfigMap should run through a ConfigMap lifecycle [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] ConfigMap should be consumable as environment variable names when configmap keys start with a digit [Feature:RelaxedEnvironmentVariableValidation] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:RelaxedEnvironmentVariableValidation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should have monotonically increasing restart count [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted with an exec liveness probe with timeout [MinimumKubeletVersion:1.20] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should not be ready with an exec readiness probe timeout [MinimumKubeletVersion:1.20] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted with a failing exec liveness probe that took longer than the timeout [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted with a local redirect http liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should *not* be restarted with a non-local redirect http liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted startup probe fails [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should *not* be restarted by liveness probe because startup probe delays it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted by liveness probe after startup probe enables it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be ready immediately after startupProbe succeeds [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should override timeoutGracePeriodSeconds when LivenessProbe field is set [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should override timeoutGracePeriodSeconds when StartupProbe field is set [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should *not* be restarted with a GRPC liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted with a GRPC liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should mark readiness on pods to false while pod is in progress of terminating when a pod has a readiness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should mark readiness on pods to false and disable liveness probes while pod is in progress of terminating [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container with readiness probe should not be ready before initial delay and never restart [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container with readiness probe that fails should never be ready and never restart [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a exec \"cat /tmp/health\" liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a /healthz http liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a tcp:8080 liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should have monotonically increasing restart count [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a /healthz http liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with an exec liveness probe with timeout [MinimumKubeletVersion:1.20] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should not be ready with an exec readiness probe timeout [MinimumKubeletVersion:1.20] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a failing exec liveness probe that took longer than the timeout [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a local redirect http liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a non-local redirect http liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted startup probe fails [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted by liveness probe because startup probe delays it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted by liveness probe after startup probe enables it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be ready immediately after startupProbe succeeds [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should override timeoutGracePeriodSeconds when LivenessProbe field is set [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should override timeoutGracePeriodSeconds when StartupProbe field is set [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a GRPC liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a GRPC liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should mark readiness on pods to false while pod is in progress of terminating when a pod has a readiness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should mark readiness on pods to false and disable liveness probes while pod is in progress of terminating [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Containers should use the image defaults if command and args are blank [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Containers should be able to override the image's default arguments (container cmd) [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Containers should be able to override the image's default command (container entrypoint) [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Containers should be able to override the image's default command and arguments [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API should provide host IP as an env var [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API should provide host IP and pod IP as an env var if pod uses host network [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API should provide pod UID as env vars [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API [Serial] [Disruptive] [NodeFeature:DownwardAPIHugePages] Downward API tests for hugepages should provide container's limits.hugepages-\u003cpagesize\u003e and requests.hugepages-\u003cpagesize\u003e as env vars [Suite:k8s]", + "labels": { + "Disruptive": {}, + "NodeFeature:DownwardAPIHugePages": {}, + "Serial": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API [Serial] [Disruptive] [NodeFeature:DownwardAPIHugePages] Downward API tests for hugepages should provide default limits.hugepages-\u003cpagesize\u003e from node allocatable [Suite:k8s]", + "labels": { + "Disruptive": {}, + "NodeFeature:DownwardAPIHugePages": {}, + "Serial": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Ephemeral Containers [NodeConformance] will start an ephemeral container in an existing pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Ephemeral Containers [NodeConformance] should update the ephemeral containers in an existing pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should allow composing env vars into new env vars [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should allow substituting values in a container's command [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should allow substituting values in a container's args [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should allow substituting values in a volume subpath [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should fail substituting values in a volume subpath with backticks [Slow] [Conformance] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should fail substituting values in a volume subpath with absolute path [Slow] [Conformance] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow] [Conformance] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should succeed in writing subpaths in container [Slow] [Conformance] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion allow almost all printable ASCII characters as environment variable names [Feature:RelaxedEnvironmentVariableValidation] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:RelaxedEnvironmentVariableValidation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] ImageCredentialProvider [Feature:KubeletCredentialProviders] should be able to create pod with image credentials fetched from external credential provider [Disabled:RebaseInProgress] [Suite:k8s]", + "labels": { + "Feature:KubeletCredentialProviders": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] InitContainer [NodeConformance] should invoke init containers on a RestartNever pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] InitContainer [NodeConformance] should invoke init containers on a RestartAlways pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] InitContainer [NodeConformance] should not start app containers if init containers fail on a RestartAlways pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] InitContainer [NodeConformance] should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet when scheduling a busybox command in a pod should print the output to logs [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet when scheduling a busybox command that always fails in a pod should have an terminated reason [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet when scheduling a busybox command that always fails in a pod should be possible to delete [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet when scheduling an agnhost Pod with hostAliases should write entries to /etc/hosts [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet when scheduling a read only busybox container should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet with pods in a privileged namespace when scheduling an agnhost Pod with hostAliases and hostNetwork should write entries to /etc/hosts when hostNetwork is enabled [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] KubeletManagedEtcHosts should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Lease lease API should be available [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart exec hook properly [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop exec hook properly [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart http hook properly [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart https hook properly [MinimumKubeletVersion:1.23] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop http hook properly [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop https hook properly [MinimumKubeletVersion:1.23] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart exec hook properly [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop exec hook properly [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart http hook properly [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart https hook properly [MinimumKubeletVersion:1.23] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop http hook properly [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop https hook properly [MinimumKubeletVersion:1.23] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Feature:PodLifecycleSleepAction] when create a pod with lifecycle hook using sleep action valid prestop hook using sleep action [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:PodLifecycleSleepAction": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Feature:PodLifecycleSleepAction] when create a pod with lifecycle hook using sleep action reduce GracePeriodSeconds during runtime [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:PodLifecycleSleepAction": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Feature:PodLifecycleSleepAction] when create a pod with lifecycle hook using sleep action ignore terminated container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:PodLifecycleSleepAction": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NodeLease NodeLease the kubelet should create and update a lease in the kube-node-lease namespace [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NodeLease NodeLease should have OwnerReferences set [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NodeLease NodeLease the kubelet should report node status infrequently [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] PodOSRejection [NodeConformance] Kubelet should reject pod when the node OS doesn't match pod's OS [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should get a host IP [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should be submitted and removed [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should be updated [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should contain environment variables for services [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should support remote command execution over websockets [NodeConformance] [Conformance] [Skipped:Proxy] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should support retrieving logs from the container over websockets [NodeConformance] [Conformance] [Skipped:Proxy] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should have their auto-restart back-off timer reset on image update [Slow] [NodeConformance] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should cap back-off at MaxContainerBackOff [Slow] [NodeConformance] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should support pod readiness gates [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should delete a collection of pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should run through the lifecycle of Pods and PodStatus [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should patch a pod status [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] PodTemplates should run the lifecycle of PodTemplates [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] PodTemplates should delete a collection of pod templates [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] PodTemplates should replace a pod template [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] PrivilegedPod [NodeConformance] should enable privileged commands [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test when starting a container that exits should run with the expected status [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test on terminated container should report termination message if TerminationMessagePath is set [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test on terminated container should report termination message if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test on terminated container should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test on terminated container should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test on terminated container should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test when running a container with a new image should not be able to pull image from invalid registry [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test when running a container with a new image should be able to pull image [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test when running a container with a new image should not be able to pull from private registry without secret [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test when running a container with a new image should be able to pull from private registry with secret [NodeConformance] [Disabled:Broken] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should reject a Pod requesting a non-existent RuntimeClass [NodeConformance] [Conformance] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should reject a Pod requesting a RuntimeClass with an unconfigured handler [NodeFeature:RuntimeHandler] [Disabled:Broken] [Suite:k8s]", + "labels": { + "NodeFeature:RuntimeHandler": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should run a Pod requesting a RuntimeClass with a configured handler [NodeFeature:RuntimeHandler] [Disabled:Broken] [Suite:k8s]", + "labels": { + "NodeFeature:RuntimeHandler": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should schedule a Pod requesting a RuntimeClass and initialize its Overhead [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should reject a Pod requesting a deleted RuntimeClass [NodeConformance] [Conformance] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should support RuntimeClasses API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Secrets should be consumable from pods in env vars [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Secrets should be consumable via the environment [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Secrets should fail to create secret due to empty secret key [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Secrets should patch a secret [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Secrets should be consumable as environment variable names when secret keys start with a digit [Feature:RelaxedEnvironmentVariableValidation] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:RelaxedEnvironmentVariableValidation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with HostUsers must create the user namespace if set to false [LinuxOnly] [Feature:UserNamespacesSupport] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:UserNamespacesSupport": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with HostUsers must not create the user namespace if set to true [LinuxOnly] [Feature:UserNamespacesSupport] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:UserNamespacesSupport": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with HostUsers should mount all volumes with proper permissions with hostUsers=false [LinuxOnly] [Feature:UserNamespacesSupport] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:UserNamespacesSupport": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with HostUsers should set FSGroup to user inside the container with hostUsers=false [LinuxOnly] [Feature:UserNamespacesSupport] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:UserNamespacesSupport": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a container with runAsUser should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a container with runAsUser should run the container with uid 0 [LinuxOnly] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a container with runAsNonRoot should run with an explicit non-root user ID [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a container with runAsNonRoot should not run with an explicit root user ID [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a container with runAsNonRoot should run with an image specified user ID [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a container with runAsNonRoot should not run without a specified user ID [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with readOnlyRootFilesystem should run the container with readonly rootfs when readOnlyRootFilesystem=true [LinuxOnly] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with readOnlyRootFilesystem should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with privileged should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with privileged should run the container as privileged when true [LinuxOnly] [NodeFeature:HostAccess] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:HostAccess": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context when creating containers with AllowPrivilegeEscalation should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context when creating containers with AllowPrivilegeEscalation should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context when creating containers with AllowPrivilegeEscalation should allow privilege escalation when true [LinuxOnly] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] User Namespaces for Pod Security Standards [LinuxOnly] with UserNamespacesSupport and UserNamespacesPodSecurityStandards enabled should allow pod [Feature:UserNamespacesPodSecurityStandards] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:UserNamespacesPodSecurityStandards": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should support sysctls [MinimumKubeletVersion:1.21] [Environment:NotInUserNS] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Environment:NotInUserNS": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should reject invalid sysctls [MinimumKubeletVersion:1.21] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should not launch unsafe, but not explicitly enabled sysctls on the node [MinimumKubeletVersion:1.21] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should support sysctls with slashes as separator [MinimumKubeletVersion:1.23] [Environment:NotInUserNS] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Environment:NotInUserNS": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] LimitRange should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] LimitRange should list, patch and delete a LimitRange by collection [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates local ephemeral storage resource limits of pods that are allowed to run [Feature:LocalStorageCapacityIsolation] [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:LocalStorageCapacityIsolation": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates pod overhead is considered along with resource limits of pods that are allowed to run verify pod overhead is accounted for [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates resource limits of pods that are allowed to run [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector is respected if not matching [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector is respected if matching [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that NodeAffinity is respected if not matching [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that required NodeAffinity setting is respected if matching [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that taints-tolerations is respected if matching [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that taints-tolerations is respected if not matching [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that there is no conflict between pods with same hostPort but different hostIP and protocol [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance] [Slow] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] PodTopologySpread Filtering validates 4 pods with MaxSkew=1 are evenly distributed into 2 nodes [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates Pods with non-empty schedulingGates are blocked on scheduling [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] when PVC has node-affinity to non-existent/illegal nodes, the pod should be scheduled normally if suitable nodes exist [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPreemption [Serial] validates basic preemption works [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPreemption [Serial] validates lower priority pod preemption by critical pod [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPreemption [Serial] validates pod disruption condition is added to the preempted pod [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPreemption [Serial] PodTopologySpread Preemption validates proper pods are preempted [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPreemption [Serial] PreemptionExecutionPath runs ReplicaSets to verify preemption running path [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPreemption [Serial] PriorityClass endpoints verify PriorityClass endpoints can be operated with different HTTP methods [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPriorities [Serial] Pod should be scheduled to node that don't match the PodAntiAffinity terms [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPriorities [Serial] Pod should be preferably scheduled to nodes pod can tolerate [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPriorities [Serial] PodTopologySpread Scoring validates pod should be preferably scheduled to node which makes the matching pods more evenly distributed [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] Multi-AZ Clusters should spread the pods of a service across zones [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] Multi-AZ Clusters should spread the pods of a replication controller across zones [Serial] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should schedule multiple jobs concurrently [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should not schedule jobs when suspended [Slow] [Conformance] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should not schedule new jobs when ForbidConcurrent [Slow] [Conformance] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should replace jobs when ReplaceConcurrent [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should be able to schedule after more than 100 missed schedule [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should not emit unexpected warnings [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should remove from active list jobs that have been deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should delete successful finished jobs with limit of one successful job [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should delete failed finished jobs with limit of one job [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should support timezone [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should support CronJob API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DaemonRestart [Disruptive] Controller Manager should not create/delete replicas across restart [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DaemonRestart [Disruptive] Scheduler should continue assigning pods to nodes across restart [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DaemonRestart [Disruptive] Kubelet should not restart containers across restart [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DaemonRestart [Disruptive] Kube-proxy should recover after being killed accidentally [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should run a job to completion when tasks succeed [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should allow to use the pod failure policy on exit code to fail the job early [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should allow to use the pod failure policy to not count the failure towards the backoffLimit [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should allow to use a pod failure policy to ignore failure for an evicted pod; matching on the exit code [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should allow to use a pod failure policy to ignore failure for an evicted pod; matching on the DisruptionTarget condition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should not create pods when created in suspend state [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should delete pods when suspended [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should recreate pods only after they have failed if pod replacement policy is set to Failed [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should create pods for an Indexed job with completion indexes and specified hostname [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job with successPolicy should succeeded when all indexes succeeded [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job with successPolicy succeededIndexes rule should succeeded even when some indexes remain pending [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job with successPolicy succeededCount rule should succeeded even when some indexes remain pending [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should execute all indexes despite some failing when using backoffLimitPerIndex [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should terminate job execution when the number of failed indexes exceeds maxFailedIndexes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should mark indexes as failed when the FailIndex action is matched in podFailurePolicy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should remove pods when job is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should run a job to completion when tasks sometimes fail and are locally restarted [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should run a job to completion when tasks sometimes fail and are not locally restarted [Flaky] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should fail when exceeds active deadline [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should delete a job [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should adopt matching orphans and release non-matching pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should fail to exceed backoffLimit [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should run a job to completion with CPU requests [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should apply changes to a job status [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should manage the lifecycle of a job [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should update the status ready field [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] TTLAfterFinished job should be deleted once it finishes after TTL seconds [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ControllerRevision [Serial] should manage the lifecycle of a ControllerRevision [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController Listing PodDisruptionBudgets for all namespaces should list and delete a collection of PodDisruptionBudgets [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should create a PodDisruptionBudget [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should observe PodDisruptionBudget status updated [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should update/patch PodDisruptionBudget status [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should observe that the PodDisruptionBudget status is not updated for unmanaged pods [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController evictions: no PDB =\u003e should allow an eviction [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController evictions: too few pods, absolute =\u003e should not allow an eviction [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController evictions: enough pods, absolute =\u003e should allow an eviction [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController evictions: enough pods, replicaSet, percentage =\u003e should allow an eviction [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController evictions: too few pods, replicaSet, percentage =\u003e should not allow an eviction [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController evictions: maxUnavailable allow single eviction, percentage =\u003e should allow an eviction [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController evictions: maxUnavailable deny evictions, integer =\u003e should not allow an eviction [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should block an eviction until the PDB is updated to allow it [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should evict ready pods with Default UnhealthyPodEvictionPolicy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should evict ready pods with IfHealthyBudget UnhealthyPodEvictionPolicy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should evict ready pods with AlwaysAllow UnhealthyPodEvictionPolicy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should not evict unready pods with Default UnhealthyPodEvictionPolicy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should not evict unready pods with IfHealthyBudget UnhealthyPodEvictionPolicy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should evict unready pods with AlwaysAllow UnhealthyPodEvictionPolicy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicationController should serve a basic image on each replica with a public image [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicationController should serve a basic image on each replica with a private image [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicationController should surface a failure condition on a common issue like exceeded quota [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicationController should adopt matching pods on creation [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicationController should release no longer matching pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicationController should test the lifecycle of a ReplicationController [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicationController should get and update a ReplicationController scale [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet should serve a basic image on each replica with a public image [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet should serve a basic image on each replica with a private image [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet should surface a failure condition on a common issue like exceeded quota [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet should adopt matching pods on creation and release no longer matching pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet Replicaset should have a working scale subresource [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet Replace and Patch tests [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet should list and delete a collection of ReplicaSets [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet should validate Replicaset Status endpoints [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should provide basic identity [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should adopt matching orphans and release non-matching pods [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should not deadlock when a pod's predecessor fails [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications with PVCs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications for partiton1 and delete pod-0 without failing container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications for partiton1 and delete pod-0 with failing container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should implement legacy replacement when the update strategy is OnDelete [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Should recreate evicted statefulset [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should have a working scale subresource [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should list, patch and delete a collection of StatefulSets [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should validate Statefulset Status endpoints [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Deploy clustered applications [Feature:StatefulSet] [Slow] should creating a working zookeeper cluster [Suite:k8s]", + "labels": { + "Feature:StatefulSet": {}, + "Slow": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Deploy clustered applications [Feature:StatefulSet] [Slow] should creating a working redis cluster [Suite:k8s]", + "labels": { + "Feature:StatefulSet": {}, + "Slow": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Deploy clustered applications [Feature:StatefulSet] [Slow] should creating a working mysql cluster [Suite:k8s]", + "labels": { + "Feature:StatefulSet": {}, + "Slow": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Deploy clustered applications [Feature:StatefulSet] [Slow] should creating a working CockroachDB cluster [Suite:k8s]", + "labels": { + "Feature:StatefulSet": {}, + "Slow": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet MinReadySeconds should be honored when enabled [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet AvailableReplicas should get updated accordingly when MinReadySeconds is enabled [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should delete PVCs with a WhenDeleted policy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should delete PVCs with a OnScaledown policy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should not delete PVC with OnScaledown policy if another controller owns the PVC [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should delete PVCs after adopting pod (WhenDeleted) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should delete PVCs after adopting pod (WhenScaled) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should not delete PVCs when there is another controller [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Automatically recreate PVC for pending pod when PVC is missing PVC should be recreated when pod is pending due to missing PVC [Disruptive] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Scaling StatefulSetStartOrdinal Setting .start.ordinal [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Scaling StatefulSetStartOrdinal Increasing .start.ordinal [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Scaling StatefulSetStartOrdinal Decreasing .start.ordinal [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Scaling StatefulSetStartOrdinal Removing .start.ordinal [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should run and stop simple daemon [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should run and stop complex daemon [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should run and stop complex daemon with node affinity [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should retry creating failed daemon pods [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should not update pod when spec was updated and update strategy is OnDelete [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should update pod when spec was updated and update strategy is RollingUpdate [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should rollback without unnecessary restarts [Conformance] [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should surge pods onto nodes when spec was updated and update strategy is RollingUpdate [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should list and delete a collection of DaemonSets [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should verify changes to a daemon set status [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment deployment reaping should cascade to its replica sets and pods [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment RollingUpdateDeployment should delete old pods and create new ones [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment RecreateDeployment should delete old pods and create new ones [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment deployment should delete old replica sets [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment deployment should support rollover [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment iterative rollouts should eventually progress [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment test Deployment ReplicaSet orphaning and adoption regarding controllerRef [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment Deployment should have a working scale subresource [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment deployment should support proportional scaling [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment should not disrupt a cloud load-balancer's connectivity during rollout [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment should run the lifecycle of a Deployment [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment should validate Deployment Status endpoints [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling Autoscaling a service from 1 pod and 3 nodes to 8 pods and \u003e=4 nodes takes less than 15 minutes [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaler scalability [Slow] should scale up at all [Feature:ClusterAutoscalerScalability1] [Suite:k8s]", + "labels": { + "Feature:ClusterAutoscalerScalability1": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaler scalability [Slow] should scale up twice [Feature:ClusterAutoscalerScalability2] [Suite:k8s]", + "labels": { + "Feature:ClusterAutoscalerScalability2": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaler scalability [Slow] should scale down empty nodes [Feature:ClusterAutoscalerScalability3] [Suite:k8s]", + "labels": { + "Feature:ClusterAutoscalerScalability3": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaler scalability [Slow] should scale down underutilized nodes [Feature:ClusterAutoscalerScalability4] [Suite:k8s]", + "labels": { + "Feature:ClusterAutoscalerScalability4": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaler scalability [Slow] shouldn't scale down with underutilized nodes due to host port conflicts [Feature:ClusterAutoscalerScalability5] [Suite:k8s]", + "labels": { + "Feature:ClusterAutoscalerScalability5": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaler scalability [Slow] CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6] [Suite:k8s]", + "labels": { + "Feature:ClusterAutoscalerScalability6": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] Should scale up GPU pool from 0 [GpuType:] [Feature:ClusterSizeAutoscalingGpu] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingGpu": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] Should scale up GPU pool from 1 [GpuType:] [Feature:ClusterSizeAutoscalingGpu] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingGpu": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] Should not scale GPU pool up if pod does not require GPUs [GpuType:] [Feature:ClusterSizeAutoscalingGpu] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingGpu": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] Should scale down GPU pool from 1 [GpuType:] [Feature:ClusterSizeAutoscalingGpu] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingGpu": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pods are pending due to pod anti-affinity [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should correctly scale down after a node is not needed [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should correctly scale down after a node is not needed and one node is broken [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should correctly scale down after a node is not needed when there is non autoscaled pool [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down when rescheduling a pod is required and pdb allows for it [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining multiple pods one by one as dictated by pdb [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group up from 0 [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0 [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] Shouldn't perform scale up operation and should list unhealthy status if most of the cluster is broken [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should scale up when unprocessed pod is created and is going to be unschedulable [Feature:ClusterScaleUpBypassScheduler] [Suite:k8s]", + "labels": { + "Feature:ClusterScaleUpBypassScheduler": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale up when unprocessed pod is created and is going to be schedulable [Feature:ClusterScaleUpBypassScheduler] [Suite:k8s]", + "labels": { + "Feature:ClusterScaleUpBypassScheduler": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale up when unprocessed pod is created and scheduler is not specified to be bypassed [Feature:ClusterScaleUpBypassScheduler] [Suite:k8s]", + "labels": { + "Feature:ClusterScaleUpBypassScheduler": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] DNS horizontal autoscaling [Serial] [Slow] [KubeUp] [sig-cloud-provider-gcp] kube-dns-autoscaler should scale kube-dns pods when cluster size changed [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "KubeUp": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] DNS horizontal autoscaling kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod using Average Utilization for aggregation [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicationController Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods and verify decision stability [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicationController Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod and verify decision stability [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) ReplicationController light Should scale from 1 pod to 2 pods [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) ReplicationController light [Slow] Should scale from 2 pods to 1 pod [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods on a busy application with an idle sidecar container [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case) Should not scale up on a busy sidecar with an idle application [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) CustomResourceDefinition Should scale with a CRD targetRef [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with short downscale stabilization window should scale down soon after the stabilization period [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with long upscale stabilization window should scale up only after the stabilization period [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with autoscaling disabled shouldn't scale up [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with autoscaling disabled shouldn't scale down [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by number of Pods rate should scale up no more than given number of Pods per minute [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by number of Pods rate should scale down no more than given number of Pods per minute [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by percentage should scale up no more than given percentage of current Pods per minute [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by percentage should scale down no more than given percentage of current Pods per minute [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with both scale up and down controls configured should keep recommendation within the range over two stabilization windows [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with both scale up and down controls configured should keep recommendation within the range with stabilization window and pod limit rate [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a NodePort service [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a ClusterIP service [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a ClusterIP service and client is hostNetwork [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Conntrack should be able to preserve UDP traffic when initial unready endpoints get ready [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Conntrack proxy implementation should not be vulnerable to the invalid conntrack state bug [Privileged] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should provide DNS for the cluster [Conformance] [Skipped:Proxy] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should provide DNS for the cluster [Provider:GCE] [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should resolve DNS of partial qualified names for the cluster [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should provide DNS for services [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance] [Skipped:Proxy] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should provide DNS for pods for Hostname [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should provide DNS for pods for Subdomain [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should provide DNS for ExternalName services [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should support configurable pod DNS nameservers [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should support configurable pod resolv.conf [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should work with the pod containing more than 6 DNS search paths and longer than 256 search list characters [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS HostNetwork should resolve DNS of partial qualified names for services on hostNetwork pods with dnsPolicy: ClusterFirstWithHostNet [LinuxOnly] [Disabled:RebaseInProgress] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS HostNetwork spec.Hostname field is not silently ignored and is used for hostname for a Pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS HostNetwork spec.Hostname field is silently ignored and the node hostname is used when hostNetwork is set to true for a Pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS configMap nameserver Change stubDomain should be able to change stubDomain configuration [Slow] [Serial] [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS configMap nameserver Forward PTR lookup should forward PTR records lookup to upstream nameserver [Slow] [Serial] [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS configMap nameserver Forward external name lookup should forward externalname lookup to upstream nameserver [Slow] [Serial] [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:PerformanceDNS] [Serial] Should answer DNS query for maximum number of services per cluster [Slow] [Suite:k8s]", + "labels": { + "Feature:PerformanceDNS": {}, + "Serial": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should have ipv4 and ipv6 internal node ip [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should create pod, add ipv6 and ipv4 ip to pod ips [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should create pod, add ipv6 and ipv4 ip to host ips [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should be able to reach pod on ipv4 and ipv6 ip [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should create a single stack service with cluster ip from primary service range [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should create service with ipv4 cluster ip [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should create service with ipv6 cluster ip [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should create service with ipv4,v6 cluster ip [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should create service with ipv6,v4 cluster ip [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for pod-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for pod-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for pod-Service: sctp [Feature:SCTPConnectivity] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for node-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for node-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for endpoint-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for endpoint-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should update endpoints: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should update endpoints: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for client IP based session affinity: http [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for client IP based session affinity: udp [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should be able to handle large requests: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should be able to handle large requests: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for service endpoints using hostNetwork [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSlice should have Endpoints and EndpointSlices pointing to API Server [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSlice should create and delete Endpoints and EndpointSlices for a Service with a selector specified [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSlice should create Endpoints and EndpointSlices for Pods matching a Service [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSlice should support creating EndpointSlice API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSlice should support a Service with multiple ports specified in multiple EndpointSlices [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSlice should support a Service with multiple endpoint IPs specified in multiple EndpointSlices [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSliceMirroring should mirror a custom Endpoints resource through create update and delete [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSliceMirroring should mirror a custom Endpoint with multiple subsets and same IP address [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] ClusterDns [Feature:Example] should create pod that uses dns [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Example": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] CVE-2021-29923 IPv4 Service Type ClusterIP with leading zeros should work interpreted as decimal [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] HostPort validates that there is no conflict between pods with same hostPort but different hostIP and protocol [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Ingress API should support creating Ingress API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] IngressClass [Feature:Ingress] should set default value on new IngressClass [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Ingress": {}, + "Serial": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] IngressClass [Feature:Ingress] should not set default value if no default IngressClass [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Ingress": {}, + "Serial": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] IngressClass [Feature:Ingress] should choose the one with the later CreationTimestamp, if equal the one with the lower name when two ingressClasses are marked as default [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Ingress": {}, + "Serial": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] IngressClass [Feature:Ingress] should allow IngressClass to have Namespace-scoped parameters [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Ingress": {}, + "Serial": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] IngressClass API should support creating IngressClass API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] KubeProxy should set TCP CLOSE_WAIT timeout [Privileged] [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] KubeProxy should update metric for tracking accepted packets destined for localhost nodeports [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers ExternalTrafficPolicy: Local [Feature:LoadBalancer] [Slow] should work for type=LoadBalancer [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers ExternalTrafficPolicy: Local [Feature:LoadBalancer] [Slow] should only target nodes with endpoints [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers ExternalTrafficPolicy: Local [Feature:LoadBalancer] [Slow] should work from pods [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers ExternalTrafficPolicy: Local [Feature:LoadBalancer] [Slow] should handle updates to ExternalTrafficPolicy field [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking IPerf2 [Feature:Networking-Performance] should run iperf2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Networking-Performance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] NoSNAT Should be able to send traffic between Pods without SNAT [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Connectivity Pod Lifecycle should be able to connect from a Pod to a terminating Pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Connectivity Pod Lifecycle should be able to connect to other Pod from a terminating Pod [Disabled:RebaseInProgress] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Proxy version v1 should proxy logs on node with explicit kubelet port using proxy subresource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Proxy version v1 should proxy logs on node using proxy subresource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Proxy version v1 should proxy through a service and a pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Proxy version v1 A set of valid responses are returned for both pod and service ProxyWithPath [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Proxy version v1 A set of valid responses are returned for both pod and service Proxy [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to change the type and ports of a TCP service [Slow] [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to change the type and ports of a UDP service [Slow] [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should only allow access from service loadbalancer source ranges [Slow] [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should have session affinity work for LoadBalancer service with Local traffic policy [Slow] [LinuxOnly] [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to switch session affinity for LoadBalancer service with Local traffic policy [Slow] [LinuxOnly] [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should have session affinity work for LoadBalancer service with Cluster traffic policy [Slow] [LinuxOnly] [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to switch session affinity for LoadBalancer service with Cluster traffic policy [Slow] [LinuxOnly] [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should handle load balancer cleanup finalizer for service [Slow] [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to create LoadBalancer Service without NodePort and change it [Slow] [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on different nodes [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on the same nodes [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should not have connectivity disruption during rolling update with externalTrafficPolicy=Cluster [Slow] [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should not have connectivity disruption during rolling update with externalTrafficPolicy=Local [Slow] [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking should provide Internet connection for containers [Feature:Networking-IPv4] [Skipped:Disconnected] [Skipped:Proxy] [Skipped:azure] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Networking-IPv4": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking should provide Internet connection for containers [Feature:Networking-IPv6] [Experimental][LinuxOnly] [Disabled:Broken] [Skipped:Disconnected] [Skipped:Proxy] [Skipped:azure] [Suite:k8s]", + "labels": { + "Feature:Networking-IPv6": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking should provider Internet connection for containers using DNS [Feature:Networking-DNS] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Networking-DNS": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking should provide unchanging, static URL paths for kubernetes api services [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking should check kube-proxy urls [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for pod-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for pod-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for pod-Service: sctp [Feature:SCTPConnectivity] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for node-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for node-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for node-Service: sctp [Feature:SCTPConnectivity] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for endpoint-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for endpoint-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for endpoint-Service: sctp [Feature:SCTPConnectivity] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for multiple endpoint-Services with same selector [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should update endpoints: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should update endpoints: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should update nodePort: http [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should support basic nodePort: udp functionality [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should update nodePort: udp [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for client IP based session affinity: http [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for client IP based session affinity: udp [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should be able to handle large requests: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should be able to handle large requests: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for service endpoints using hostNetwork [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking should recreate its iptables rules if they are deleted [Disruptive] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking should allow creating a Pod with an SCTP HostPort [LinuxOnly] [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should provide secure master service [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should serve a basic endpoint from pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should serve multiport endpoints from pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be updated after adding or deleting ports [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should preserve source pod IP for traffic thru service cluster IP [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should allow pods to hairpin back to themselves through services [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to up and down services [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should work after the service has been recreated [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should work after restarting kube-proxy [Disruptive] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should work after restarting apiserver [Disruptive] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to create a functioning NodePort service [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be possible to connect to a service via ExternalIP when the external IP is not assigned to a node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to update service type to NodePort listening on same port number but different protocols [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to change the type from ExternalName to ClusterIP [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to change the type from ExternalName to NodePort [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to change the type from ClusterIP to ExternalName [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to change the type from NodePort to ExternalName [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should prevent NodePort collisions [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should check NodePort out-of-range [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should release NodePorts on delete [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should create endpoints for unready pods [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to connect to terminating and unready endpoints if PublishNotReadyAddresses is true [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should not be able to connect to terminating and unready endpoints if PublishNotReadyAddresses is false [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should have session affinity timeout work for service with type clusterIP [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should have session affinity work for NodePort service [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should have session affinity timeout work for NodePort service [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should implement service.kubernetes.io/service-proxy-name [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should implement service.kubernetes.io/headless [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be rejected when no endpoints exist [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be rejected for evicted pods (no endpoints exist) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should respect internalTrafficPolicy=Local Pod to Pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should respect internalTrafficPolicy=Local Pod (hostNetwork: true) to Pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should respect internalTrafficPolicy=Local Pod and Node, to Pod (hostNetwork: true) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should support externalTrafficPolicy=Local for type=NodePort [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should fail health check node port if there are only terminating endpoints [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should fallback to terminating endpoints when there are no ready endpoints with internalTrafficPolicy=Cluster [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should fallback to local terminating endpoints when there are no ready endpoints with internalTrafficPolicy=Local [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should fallback to terminating endpoints when there are no ready endpoints with externallTrafficPolicy=Cluster [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should fallback to local terminating endpoints when there are no ready endpoints with externalTrafficPolicy=Local [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should find a service from listing all namespaces [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should test the lifecycle of an Endpoint [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should complete a service status lifecycle [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should delete a collection of services [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should serve endpoints on same port and different protocols [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should serve endpoints on same port and different protocol for internal traffic on Type LoadBalancer [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should allow creating a basic SCTP service with pod and endpoints [LinuxOnly] [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:ServiceCIDRs] should create Services and servce on different Service CIDRs [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:ServiceCIDRs": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Service endpoints latency should not be very high [Conformance] [Serial] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:Topology Hints] should distribute endpoints evenly [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:Topology Hints": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:Traffic Distribution] when Service has trafficDistribution=PreferClose should route traffic to an endpoint that is close to the client [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Traffic Distribution": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] AppArmor load AppArmor profiles should enforce an AppArmor profile specified on the pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] AppArmor load AppArmor profiles should enforce an AppArmor profile specified on the container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] AppArmor load AppArmor profiles should enforce an AppArmor profile specified in annotations [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] AppArmor load AppArmor profiles can disable an AppArmor profile, using unconfined [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] crictl should be able to run crictl on the node [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Events should be sent by kubelets and the scheduler about pods scheduling and running [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Feature:Example] Liveness liveness pods should be automatically restarted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Example": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Feature:Example] Secret should create a pod that reads a secret [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Example": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Feature:Example] Downward API should create a pod that prints his name and namespace [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Example": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Feature:GPUDevicePlugin] Sanity test for Nvidia Device should run nvidia-smi cli [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:GPUDevicePlugin": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Feature:GPUDevicePlugin] Sanity test for Nvidia Device should run gpu based matrix multiplication [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:GPUDevicePlugin": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet Clean up pods on node kubelet should be able to delete 10 pods per node in 1m0s. [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet host cleanup with volume mounts [HostCleanup] [Flaky] Host cleanup after disrupting NFS volume [NFS] after stopping the nfs-server and deleting the (sleeping) client pod, the NFS mount and the pod's UID directory should be removed. [Suite:k8s]", + "labels": { + "Flaky": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet host cleanup with volume mounts [HostCleanup] [Flaky] Host cleanup after disrupting NFS volume [NFS] after stopping the nfs-server and deleting the (active) client pod, the NFS mount and the pod's UID directory should be removed. [Suite:k8s]", + "labels": { + "Flaky": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the error with an empty --query option [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the kubelet logs [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the kubelet logs for the current boot [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the last three lines of the kubelet logs [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the kubelet logs for the current boot with the pattern container [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the kubelet logs since the current date and time [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the Microsoft-Windows-Security-SPP logs [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the last three lines of the Microsoft-Windows-Security-SPP logs [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the Microsoft-Windows-Security-SPP logs with the pattern Health [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet [Serial] [Slow] regular resource usage tracking [Feature:RegularResourceUsageTracking] resource tracking for 0 pods per node [Suite:k8s]", + "labels": { + "Feature:RegularResourceUsageTracking": {}, + "Serial": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet [Serial] [Slow] regular resource usage tracking [Feature:RegularResourceUsageTracking] resource tracking for 100 pods per node [Suite:k8s]", + "labels": { + "Feature:RegularResourceUsageTracking": {}, + "Serial": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet [Serial] [Slow] experimental resource usage tracking [Feature:ExperimentalResourceUsageTracking] resource tracking for 100 pods per node [Suite:k8s]", + "labels": { + "Feature:ExperimentalResourceUsageTracking": {}, + "Serial": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Mount propagation should propagate mounts within defined scopes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NodeProblemDetector [NodeFeature:NodeProblemDetector] should run without error [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "NodeFeature:NodeProblemDetector": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod garbage collector [Feature:PodGarbageCollector] [Slow] should handle the creation of 1000 pods [Suite:k8s]", + "labels": { + "Feature:PodGarbageCollector": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Serial] Pod InPlace Resize Container (scheduler-focused) [Feature:InPlacePodVerticalScaling] pod-resize-scheduler-tests [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "Serial": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - increase CPU \u0026 memory [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - decrease CPU \u0026 memory [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - increase CPU \u0026 decrease memory [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - decrease CPU \u0026 increase memory [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, three containers (c1, c2, c3) - increase: CPU (c1,c3), memory (c2) ; decrease: CPU (c2), memory (c1,c3) [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease memory requests only [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease memory limits only [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase memory requests only [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase memory limits only [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease CPU requests only [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease CPU limits only [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase CPU requests only [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase CPU limits only [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease CPU requests and limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase CPU requests and limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease CPU requests and increase CPU limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase CPU requests and decrease CPU limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease memory requests and limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase memory requests and limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease memory requests and increase memory limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase memory requests and decrease memory limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease CPU requests and increase memory limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase CPU requests and decrease memory limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease memory requests and increase CPU limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase memory requests and decrease CPU limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests - decrease memory request [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - increase CPU (NotRequired) \u0026 memory (RestartContainer) [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container - decrease CPU (RestartContainer) \u0026 memory (NotRequired) [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, three containers - increase c1 resources, no change for c2, decrease c3 resources (no net change for pod) [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, three containers - decrease c1 resources, increase c2 resources, no change for c3 (net increase for pod) [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, three containers - no change for c1, increase c2 resources, decrease c3 (net decrease for pod) [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] pod-resize-resource-quota-test [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] BestEffort pod - try requesting memory, expect error [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods Extended Delete Grace Period should be submitted and removed [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods Extended Pods Set QOS Class should be set on Pods with matching resource requests and limits for memory and cpu [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods Extended Pod Container Status should never report success for a pending container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods Extended Pod Container Status should never report container start when an init container fails [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods Extended Pod Container lifecycle should not create extra sandbox if all containers are done [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods Extended Pod Container lifecycle evicted pods should be terminal [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods Extended Pod TerminationGracePeriodSeconds is negative pod with negative grace period [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] PreStop should call prestop when killing a pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] PreStop graceful pod terminated should wait until preStop hook completes the process [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should reject a Pod requesting a RuntimeClass with conflicting node selector [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should run a Pod requesting a RuntimeClass with scheduling with taints [Serial] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should run a Pod requesting a RuntimeClass with scheduling without taints [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support pod.Spec.SecurityContext.SupplementalGroups [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context when if the container's primary UID belongs to some groups in the image [LinuxOnly] should add pod.Spec.SecurityContext.SupplementalGroups to them [LinuxOnly] in resultant supplementary groups for the container processes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context [sig-node] SupplementalGroupsPolicy [Feature:SupplementalGroupsPolicy] when SupplementalGroupsPolicy was not set if the container's primary UID belongs to some groups in the image, it should add SupplementalGroups to them [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SupplementalGroupsPolicy": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context [sig-node] SupplementalGroupsPolicy [Feature:SupplementalGroupsPolicy] when SupplementalGroupsPolicy was set to Merge if the container's primary UID belongs to some groups in the image, it should add SupplementalGroups to them [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SupplementalGroupsPolicy": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context [sig-node] SupplementalGroupsPolicy [Feature:SupplementalGroupsPolicy] when SupplementalGroupsPolicy was set to Strict even if the container's primary UID belongs to some groups in the image, it should not add SupplementalGroups to them [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SupplementalGroupsPolicy": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support pod.Spec.SecurityContext.RunAsUser [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support container.SecurityContext.RunAsUser [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support volume SELinux relabeling [Flaky] [LinuxOnly] [Suite:k8s]", + "labels": { + "Flaky": {}, + "LinuxOnly": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support volume SELinux relabeling when using hostIPC [Flaky] [LinuxOnly] [Suite:k8s]", + "labels": { + "Flaky": {}, + "LinuxOnly": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support volume SELinux relabeling when using hostPID [Flaky] [LinuxOnly] [Suite:k8s]", + "labels": { + "Flaky": {}, + "LinuxOnly": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support seccomp unconfined on the container [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support seccomp unconfined on the pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support seccomp runtime/default [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support seccomp default which is unconfined [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] SSH should SSH to all nodes and run commands [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NoExecuteTaintManager Single Pod [Serial] evicts pods from tainted nodes [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NoExecuteTaintManager Single Pod [Serial] doesn't evict pod with tolerations from tainted nodes [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NoExecuteTaintManager Single Pod [Serial] eventually evict pod with finite tolerations from tainted nodes [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NoExecuteTaintManager Single Pod [Serial] removing taint cancels eviction [Disruptive] [Conformance] [Skipped:SingleReplicaTopology] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Disruptive": {}, + "Serial": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NoExecuteTaintManager Single Pod [Serial] pods evicted from tainted nodes have pod disruption condition [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NoExecuteTaintManager Multiple Pods [Serial] only evicts pods without tolerations from tainted nodes [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NoExecuteTaintManager Multiple Pods [Serial] evicts pods with minTolerationSeconds [Disruptive] [Conformance] [Skipped:SingleReplicaTopology] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Disruptive": {}, + "Serial": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSIInlineVolumes should support CSIVolumeSource in Pod API [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSIInlineVolumes should run through the lifecycle of a CSIDriver [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Ephemeral Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Ephemeral Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Ephemeral Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Ephemeral Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volume-lifecycle-performance should provision volumes at scale within performance constraints [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] read-write-once-pod [MinimumKubeletVersion:1.27] should preempt lower priority pods using ReadWriteOncePod volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "MinimumKubeletVersion:1.27": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] read-write-once-pod [MinimumKubeletVersion:1.27] should block a second pod from using an in-use ReadWriteOncePod volume on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "MinimumKubeletVersion:1.27": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "Feature:Windows": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "Feature:Windows": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "Feature:Windows": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ext3)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ext4)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read/write inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support two pods which have the same volume definition [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support multiple inline ephemeral volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Ephemeral Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Ephemeral Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Ephemeral Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Ephemeral Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volume-lifecycle-performance should provision volumes at scale within performance constraints [Slow] [Serial] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] read-write-once-pod [MinimumKubeletVersion:1.27] should preempt lower priority pods using ReadWriteOncePod volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "MinimumKubeletVersion:1.27": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] read-write-once-pod [MinimumKubeletVersion:1.27] should block a second pod from using an in-use ReadWriteOncePod volume on the same node [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "MinimumKubeletVersion:1.27": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "Feature:Windows": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "Feature:Windows": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "Feature:Windows": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSIStorageCapacity should support CSIStorageCapacities API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Feature:Flexvolumes] Detaching volumes should not work when mount is in progress [Slow] [Suite:k8s]", + "labels": { + "Feature:Flexvolumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir wrapper volumes should not conflict [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir wrapper volumes should not cause race condition when used for configmaps [Serial] [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir wrapper volumes should not cause race condition when used for git_repo [Serial] [Slow] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : secret [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : configmap [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : projected [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Flexvolumes should be mountable when non-attachable [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Flexvolumes should be mountable when attachable [Feature:Flexvolumes] [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Flexvolumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Feature:Flexvolumes] Mounted flexvolume expand [Slow] Should verify mounted flex volumes can be resized [Suite:k8s]", + "labels": { + "Feature:Flexvolumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Feature:Flexvolumes] Mounted flexvolume volume expand [Slow] should be resizable when mounted [Suite:k8s]", + "labels": { + "Feature:Flexvolumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] GenericPersistentVolume [Disruptive] When kubelet restarts Should test that a file written to the mount before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] GenericPersistentVolume [Disruptive] When kubelet restarts Should test that a volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] GenericPersistentVolume [Disruptive] When kubelet restarts Should test that a volume mounted to a pod that is force deleted while the kubelet is down unmounts when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Directory [Slow] Should fail on mounting non-existent directory 'does-not-exist-dir' when HostPathType is HostPathDirectory [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Directory [Slow] Should be able to mount directory 'adir' successfully when HostPathType is HostPathDirectory [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Directory [Slow] Should be able to mount directory 'adir' successfully when HostPathType is HostPathUnset [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Directory [Slow] Should fail on mounting directory 'adir' when HostPathType is HostPathFile [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Directory [Slow] Should fail on mounting directory 'adir' when HostPathType is HostPathSocket [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Directory [Slow] Should fail on mounting directory 'adir' when HostPathType is HostPathCharDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Directory [Slow] Should fail on mounting directory 'adir' when HostPathType is HostPathBlockDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType File [Slow] Should fail on mounting non-existent file 'does-not-exist-file' when HostPathType is HostPathFile [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType File [Slow] Should be able to mount file 'afile' successfully when HostPathType is HostPathFile [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType File [Slow] Should be able to mount file 'afile' successfully when HostPathType is HostPathUnset [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType File [Slow] Should fail on mounting file 'afile' when HostPathType is HostPathDirectory [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType File [Slow] Should fail on mounting file 'afile' when HostPathType is HostPathSocket [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType File [Slow] Should fail on mounting file 'afile' when HostPathType is HostPathCharDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType File [Slow] Should fail on mounting file 'afile' when HostPathType is HostPathBlockDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Socket [Slow] Should fail on mounting non-existent socket 'does-not-exist-socket' when HostPathType is HostPathSocket [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Socket [Slow] Should be able to mount socket 'asocket' successfully when HostPathType is HostPathSocket [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Socket [Slow] Should be able to mount socket 'asocket' successfully when HostPathType is HostPathUnset [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket 'asocket' when HostPathType is HostPathDirectory [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket 'asocket' when HostPathType is HostPathFile [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket 'asocket' when HostPathType is HostPathCharDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket 'asocket' when HostPathType is HostPathBlockDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting non-existent character device 'does-not-exist-char-dev' when HostPathType is HostPathCharDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Character Device [Slow] Should be able to mount character device 'achardev' successfully when HostPathType is HostPathCharDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Character Device [Slow] Should be able to mount character device 'achardev' successfully when HostPathType is HostPathUnset [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting character device 'achardev' when HostPathType is HostPathDirectory [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting character device 'achardev' when HostPathType is HostPathFile [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting character device 'achardev' when HostPathType is HostPathSocket [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting character device 'achardev' when HostPathType is HostPathBlockDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting non-existent block device 'does-not-exist-blk-dev' when HostPathType is HostPathBlockDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Block Device [Slow] Should be able to mount block device 'ablkdev' successfully when HostPathType is HostPathBlockDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Block Device [Slow] Should be able to mount block device 'ablkdev' successfully when HostPathType is HostPathUnset [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting block device 'ablkdev' when HostPathType is HostPathDirectory [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting block device 'ablkdev' when HostPathType is HostPathFile [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting block device 'ablkdev' when HostPathType is HostPathSocket [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting block device 'ablkdev' when HostPathType is HostPathCharDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ext3)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext3)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ext4)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Disabled:Unsupported] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Disabled:Unsupported] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Disabled:Unsupported] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Disabled:Unsupported] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Skipped:azure] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Skipped:azure] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ext3)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ext4)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Mounted volume expand [Feature:StorageProvider] Should verify mounted devices can be resized [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:StorageProvider": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] NFSPersistentVolumes [Disruptive] [Flaky] when kube-controller-manager restarts should delete a bound PVC from a clientPod, restart the kube-control-manager, and ensure the kube-controller-manager does not crash [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] NFSPersistentVolumes [Disruptive] [Flaky] when kubelet restarts Should test that a file written to the mount before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] NFSPersistentVolumes [Disruptive] [Flaky] when kubelet restarts Should test that a volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] NFSPersistentVolumes [Disruptive] [Flaky] when kubelet restarts Should test that a volume mounted to a pod that is force deleted while the kubelet is down unmounts when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Feature:NodeOutOfServiceVolumeDetach] [Disruptive] [LinuxOnly] NonGracefulNodeShutdown [NonGracefulNodeShutdown] pod that uses a persistent volume via gce pd driver should get immediately rescheduled to a different node after non graceful node shutdown [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:NodeOutOfServiceVolumeDetach": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-expansion loopback local block volume should support online expansion on node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Skipped:gce] [Suite:k8s]", + "labels": { + "Flaky": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: block] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: block] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: block] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: block] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: block] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: block] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: block] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local Local volume that cannot be mounted [Slow] should fail due to non-existent path [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local Local volume that cannot be mounted [Slow] should fail due to wrong node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local Pod with node different from PV's NodeAffinity should fail scheduling due to different NodeAffinity [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local Pod with node different from PV's NodeAffinity should fail scheduling due to different NodeSelector [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes spread across nodes when pod has anti-affinity [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes on one node when pod has affinity [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes spread across nodes when pod management is parallel and pod has anti-affinity [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes on one node when pod management is parallel and pod has affinity [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local Stress with local volumes [Serial] should be able to process many pods and reuse local volumes [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs should create a non-pre-bound PV and PVC: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PVC and non-pre-bound PV: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PVC and a pre-bound PV: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV and a pre-bound PVC: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV: test phase transition timestamp is set and phase is Available [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV and a pre-bound PVC: test phase transition timestamp is set [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV and a pre-bound PVC: test phase transition timestamp multiple updates [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 2 PVs and 4 PVCs: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 3 PVs and 3 PVCs: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 4 PVs and 2 PVCs: test write access [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS when invoking the Recycle reclaim policy should test that a PV becomes Available and is clean after the PVC is deleted. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes CSI Conformance should run through the lifecycle of a PV and a PVC [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes CSI Conformance should apply changes to a pv/pvc status [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes Default StorageClass [LinuxOnly] pods that use multiple volumes should be reschedulable [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PV Protection Verify \"immediate\" deletion of a PV that is not bound to a PVC [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PV Protection Verify that PV bound to a PVC is not removed immediately [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PVC Protection Verify \"immediate\" deletion of a PVC that is not in active use by a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PVC Protection Verify that PVC in active use by a pod is not removed immediately [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PVC Protection Verify that scheduling of a pod that uses PVC that is being deleted fails and the pod becomes Unschedulable [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Persistent Volume Claim and StorageClass Retroactive StorageClass assignment [Serial] [Disruptive] should assign default SC to PVCs that have no SC set [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] StaticPods [Feature:Kind] should run after kubelet stopped with CSI volume mounted [Disruptive] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Kind": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] StorageClasses CSI Conformance should run through the lifecycle of a StorageClass [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Subpath Atomic writer volumes should support subpaths with secret pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Subpath Atomic writer volumes should support subpaths with configmap pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Subpath Atomic writer volumes should support subpaths with configmap pod with mountPath of existing file [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Subpath Atomic writer volumes should support subpaths with downward pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Subpath Atomic writer volumes should support subpaths with projected pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Subpath Container restart should verify that container can restart successfully after configmaps modified [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] VolumeAttachment Conformance should run through the lifecycle of a VolumeAttachment [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVC should create prometheus metrics for volume provisioning and attach/detach [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVC should create prometheus metrics for volume provisioning errors [Slow] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVC should create volume metrics with the correct FilesystemMode PVC ref [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVC should create volume metrics with the correct BlockMode PVC ref [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVC should create metrics for total time taken in volume operations in P/V Controller [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVC should create volume metrics in Volume Manager [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVC should create metrics for total number of volumes in A/D Controller [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics Ephemeral should create prometheus metrics for volume provisioning and attach/detach [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics Ephemeral should create prometheus metrics for volume provisioning errors [Slow] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics Ephemeral should create volume metrics with the correct FilesystemMode PVC ref [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics Ephemeral should create volume metrics with the correct BlockMode PVC ref [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics Ephemeral should create metrics for total time taken in volume operations in P/V Controller [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics Ephemeral should create volume metrics in Volume Manager [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics Ephemeral should create metrics for total number of volumes in A/D Controller [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVController should create none metrics for pvc controller before creating any PV or PVC [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVController should create unbound pv count metrics for pvc controller after creating pv only [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVController should create unbound pvc count metrics for pvc controller after creating pvc only [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVController should create bound pv/pvc count metrics for pvc controller after creating both pv and pvc [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVController should create unbound pvc count metrics for pvc controller with volume attributes class dimension after creating pvc only [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVController should create bound pv/pvc count metrics for pvc controller with volume attributes class dimension after creating both pv and pvc [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVController should create total pv count metrics for with plugin and volume mode labels after creating pv [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner [Slow] [Feature:StorageProvider] should provision storage with different parameters [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:StorageProvider": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner [Slow] [Feature:StorageProvider] should provision storage with non-default reclaim policy Retain [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:StorageProvider": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner [Slow] [Feature:StorageProvider] should test that deleting a claim before the volume is provisioned deletes the volume. [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:StorageProvider": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner [Slow] [Feature:StorageProvider] deletion should be idempotent [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:StorageProvider": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner External should let an external dynamic provisioner create and delete persistent volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner Default should create and delete default persistent volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner Default should be disabled by changing the default annotation [Serial] [Disruptive] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner Default should be disabled by removing the default annotation [Serial] [Disruptive] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning Invalid AWS KMS key should report an error and create no PV [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] VolumeAttributesClass [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should run through the lifecycle of a VolumeAttributesClass [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Volumes ConfigMap should be mountable [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume attach CSI attach test using mock driver should not require VolumeAttach for drivers without attachment [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume attach CSI attach test using mock driver should require VolumeAttach for drivers with attachment [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume attach CSI attach test using mock driver should require VolumeAttach for ephemermal volume and drivers with attachment [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume attach CSI attach test using mock driver should preserve attachment policy when no CSIDriver present [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume attach CSI CSIDriver deployment after pod creation using non-attachable mock driver should bringup pod after deploying CSIDriver attach=false [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock fsgroup as mount option Delegate FSGroup to CSI driver [LinuxOnly] should pass FSGroup to CSI driver if it is set in pod and driver supports VOLUME_MOUNT_GROUP [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock fsgroup as mount option Delegate FSGroup to CSI driver [LinuxOnly] should not pass FSGroup to CSI driver if it is set in pod and driver supports VOLUME_MOUNT_GROUP [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy [LinuxOnly] should modify fsGroup if fsGroupPolicy=default [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy [LinuxOnly] should modify fsGroup if fsGroupPolicy=File [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy [LinuxOnly] should not modify fsGroup if fsGroupPolicy=None [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should update fsGroup if update from None to File [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should update fsGroup if update from None to default [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should not update fsGroup if update from File to None [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should update fsGroup if update from File to default [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should not update fsGroup if update from detault to None [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should update fsGroup if update from detault to File [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock honor pv reclaim policy [Feature:HonorPVReclaimPolicy] [FeatureGate:HonorPVReclaimPolicy] [Beta] CSI honor pv reclaim policy using mock driver Dynamic provisioning should honor pv delete reclaim policy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:HonorPVReclaimPolicy": {}, + "FeatureGate:HonorPVReclaimPolicy": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock honor pv reclaim policy [Feature:HonorPVReclaimPolicy] [FeatureGate:HonorPVReclaimPolicy] [Beta] CSI honor pv reclaim policy using mock driver Dynamic provisioning should honor pv retain reclaim policy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:HonorPVReclaimPolicy": {}, + "FeatureGate:HonorPVReclaimPolicy": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock honor pv reclaim policy [Feature:HonorPVReclaimPolicy] [FeatureGate:HonorPVReclaimPolicy] [Beta] CSI honor pv reclaim policy using mock driver Static provisioning should honor pv delete reclaim policy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:HonorPVReclaimPolicy": {}, + "FeatureGate:HonorPVReclaimPolicy": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock honor pv reclaim policy [Feature:HonorPVReclaimPolicy] [FeatureGate:HonorPVReclaimPolicy] [Beta] CSI honor pv reclaim policy using mock driver Static provisioning should honor pv retain reclaim policy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:HonorPVReclaimPolicy": {}, + "FeatureGate:HonorPVReclaimPolicy": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should call NodeUnstage after NodeStage success [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should retry NodeStage after NodeStage final error [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should retry NodeStage after NodeStage ephemeral error [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should call NodeUnstage after NodeStage ephemeral error [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should not call NodeUnstage after NodeStage final error [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeUnstage error cases [Slow] should call NodeStage after NodeUnstage success [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeUnstage error cases [Slow] two pods: should call NodeStage after previous NodeUnstage final error [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeUnstage error cases [Slow] two pods: should call NodeStage after previous NodeUnstage transient error [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should pass SELinux mount option for RWOP volume and Pod with SELinux context set [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should add SELinux mount option to existing mount options [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not pass SELinux mount option for RWO volume with SELinuxMount disabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Feature:SELinuxMountReadWriteOncePodOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "Feature:SELinuxMountReadWriteOncePodOnly": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should pass SELinux mount option for RWO volume with SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Alpha": {}, + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMount": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not pass SELinux mount option for Pod without SELinux context [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not pass SELinux mount option for CSI driver that does not support SELinux mount [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not unstage RWOP volume when starting a second pod with the same SELinux context [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should unstage RWOP volume when starting a second pod with different SELinux context [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not unstage RWO volume when starting a second pod with the same SELinux context [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Alpha": {}, + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMount": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should unstage RWO volume when starting a second pod with different SELinux context [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Alpha": {}, + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMount": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] warning is not bumped on two Pods with the same context on RWO volume [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Feature:SELinuxMountReadWriteOncePodOnly] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "Feature:SELinuxMountReadWriteOncePodOnly": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] warning is bumped on two Pods with a different context on RWO volume [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Feature:SELinuxMountReadWriteOncePodOnly] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "Feature:SELinuxMountReadWriteOncePodOnly": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped on two Pods with the same context on RWO volume and SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Alpha": {}, + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMount": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped on two Pods with a different context on RWO volume and SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Alpha": {}, + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMount": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped on two Pods with a different context on RWX volume and SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Alpha": {}, + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMount": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped on two Pods with a different context on RWOP volume [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume service account token CSIServiceAccountToken token should not be plumbed down when csiServiceAccountTokenEnabled=false [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume service account token CSIServiceAccountToken token should not be plumbed down when CSIDriver is not deployed [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume service account token CSIServiceAccountToken token should be plumbed down when csiServiceAccountTokenEnabled=true [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume snapshot CSI Volume Snapshots [Feature:VolumeSnapshotDataSource] volumesnapshotcontent and pvc in Bound state with deletion timestamp set should not get deleted while snapshot finalizer exists [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume snapshot CSI Volume Snapshots secrets [Feature:VolumeSnapshotDataSource] volume snapshot create/delete with secrets [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume snapshot CSI Snapshot Controller metrics [Feature:VolumeSnapshotDataSource] snapshot controller should emit dynamic CreateSnapshot, CreateSnapshotAndReady, and DeleteSnapshot metrics [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume snapshot CSI Snapshot Controller metrics [Feature:VolumeSnapshotDataSource] snapshot controller should emit pre-provisioned CreateSnapshot, CreateSnapshotAndReady, and DeleteSnapshot metrics [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity storage capacity unlimited [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity storage capacity exhausted, immediate binding [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity storage capacity exhausted, late binding, no topology [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity storage capacity exhausted, late binding, with topology [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity unused [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity disabled [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity used, no capacity [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity used, insufficient capacity [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity used, have capacity [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI Volume expansion should expand volume without restarting pod if nodeExpansion=off [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI Volume expansion should expand volume by restarting pod if attach=on, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI Volume expansion should not have staging_path missing in node expand volume pod if attach=on, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI Volume expansion should expand volume by restarting pod if attach=off, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI Volume expansion should not expand volume if resizingOnDriver=off, resizingOnSC=on [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI online volume expansion with secret should expand volume without restarting pod if attach=on, nodeExpansion=on, csiNodeExpandSecret=on [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI online volume expansion should expand volume without restarting pod if attach=on, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI online volume expansion should expand volume without restarting pod if attach=off, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] should record target size in allocated resources [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:RecoverVolumeExpansionFailure": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] should allow recovery if controller expansion fails with final error [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:RecoverVolumeExpansionFailure": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] recovery should not be possible in partially expanded volumes [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:RecoverVolumeExpansionFailure": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume limit CSI volume limit information using mock driver should report attach limit when limit is bigger than 0 [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume limit CSI volume limit information using mock driver should report attach limit for generic ephemeral volume when persistent volume is attached [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume limit CSI volume limit information using mock driver should report attach limit for persistent volume when generic ephemeral volume is attached [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock workload info CSI workload information using mock driver should be passed when podInfoOnMount=true [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock workload info CSI workload information using mock driver contain ephemeral=true when using inline volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock workload info CSI workload information using mock driver should not be passed when podInfoOnMount=false [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock workload info CSI workload information using mock driver should not be passed when CSIDriver does not exist [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock workload info CSI PodInfoOnMount Update should not be passed when update from true to false [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock workload info CSI PodInfoOnMount Update should be passed when update from false to true [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] stateful Upgrade [Feature:StatefulUpgrade] stateful upgrade should maintain a functioning cluster [Disabled:Unimplemented] [Suite:k8s]", + "labels": { + "Feature:StatefulUpgrade": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccount admission controller migration [Feature:BoundServiceAccountTokenVolume] master upgrade should maintain a functioning cluster [Disabled:Unimplemented] [Suite:k8s]", + "labels": { + "Feature:BoundServiceAccountTokenVolume": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] kube-proxy migration [Feature:KubeProxyDaemonSetMigration] Upgrade kube-proxy from static pods to a DaemonSet should maintain a functioning cluster [Feature:KubeProxyDaemonSetUpgrade] [Disabled:Unimplemented] [Suite:k8s]", + "labels": { + "Feature:KubeProxyDaemonSetMigration": {}, + "Feature:KubeProxyDaemonSetUpgrade": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] kube-proxy migration [Feature:KubeProxyDaemonSetMigration] Downgrade kube-proxy from a DaemonSet to static pods should maintain a functioning cluster [Feature:KubeProxyDaemonSetDowngrade] [Disabled:Unimplemented] [Suite:k8s]", + "labels": { + "Feature:KubeProxyDaemonSetDowngrade": {}, + "Feature:KubeProxyDaemonSetMigration": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Upgrade [Feature:Upgrade] master upgrade should maintain a functioning cluster [Feature:MasterUpgrade] [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:MasterUpgrade": {}, + "Feature:Upgrade": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Upgrade [Feature:Upgrade] cluster upgrade should maintain a functioning cluster [Feature:ClusterUpgrade] [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:ClusterUpgrade": {}, + "Feature:Upgrade": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Downgrade [Feature:Downgrade] cluster downgrade should maintain a functioning cluster [Feature:ClusterDowngrade] [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:ClusterDowngrade": {}, + "Feature:Downgrade": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] GKE node pools [Feature:GKENodePool] should create a cluster with multiple node pools [Feature:GKENodePool] [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:GKENodePool": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] HA-master [Feature:HAMaster] survive addition/removal replicas same zone [Serial] [Disruptive] [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:HAMaster": {}, + "Serial": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] HA-master [Feature:HAMaster] survive addition/removal replicas different zones [Serial] [Disruptive] [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:HAMaster": {}, + "Serial": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] HA-master [Feature:HAMaster] survive addition/removal replicas multizone workers [Serial] [Disruptive] [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:HAMaster": {}, + "Serial": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Ports Security Check [Feature:KubeletSecurity] should not be able to proxy to the readonly kubelet port 10255 using proxy subresource [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:KubeletSecurity": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Ports Security Check [Feature:KubeletSecurity] should not be able to proxy to cadvisor port 4194 using proxy subresource [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:KubeletSecurity": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Ports Security Check [Feature:KubeletSecurity] should not have port 10255 open on its all public IP addresses [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:KubeletSecurity": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Ports Security Check [Feature:KubeletSecurity] should not have port 4194 open on its all public IP addresses [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:KubeletSecurity": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] [Disruptive] NodeLease NodeLease deletion node lease should be deleted when corresponding node is deleted [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by ordering clean reboot and ensure they function upon restart [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Reboot": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by ordering unclean reboot and ensure they function upon restart [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Reboot": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by triggering kernel panic and ensure they function upon restart [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Reboot": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by switching off the network interface and ensure they function upon switch on [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Reboot": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by dropping all inbound packets for a while and ensure they function afterwards [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Reboot": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by dropping all outbound packets for a while and ensure they function afterwards [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Reboot": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Nodes [Disruptive] Resize [Slow] should be able to delete nodes [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Nodes [Disruptive] Resize [Slow] should be able to add nodes [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Restart [Disruptive] [KubeUp] should restart all nodes and ensure all nodes and pods recover [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "KubeUp": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider] [Feature:CloudProvider] [Disruptive] Nodes should be deleted on API server if it doesn't exist in the cloud provider [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:CloudProvider": {}, + "sig-cloud-provider": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Cpu Resources [Serial] Container limits should not be exceeded after waiting 2 minutes [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Density [Serial] [Slow] create a batch of pods latency/resource should be within limit when create 10 pods with 0s interval [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:GPUDevicePlugin] Device Plugin should be able to create a functioning device plugin for Windows [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:GPUDevicePlugin": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] DNS should support configurable pod DNS servers [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Eviction [Serial] [Slow] [Disruptive] should evict a pod when a node experiences memory pressure [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] GMSA Full [Serial] [Slow] GMSA support works end to end [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] GMSA Full [Serial] [Slow] GMSA support can read and write file to remote SMB folder [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] GMSA Kubelet [Slow] kubelet GMSA support when creating a pod with correct GMSA credential specs passes the credential specs down to the Pod's containers [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should run as a process on the host/node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should support init containers [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers container command path validation [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should support various volume mount types [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers metrics should report count of started and failed to start HostProcess containers [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers container stats validation [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should support querying api-server using in-cluster config [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should run as localgroup accounts [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] Hybrid cluster network for all supported CNIs should have stable networking for Linux and Windows pods [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] Hybrid cluster network for all supported CNIs should provide Internet connection for Linux containers [Feature:Networking-IPv4] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Networking-IPv4": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] Hybrid cluster network for all supported CNIs should provide Internet connection and DNS for Windows containers [Feature:Networking-IPv4] [Feature:Networking-DNS] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Networking-DNS": {}, + "Feature:Networking-IPv4": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHyperVContainers] HyperV containers should start a hyperv isolated container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:WindowsHyperVContainers": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Kubelet-Stats [Serial] Kubelet stats collection for Windows nodes when running 10 pods should return within 10 seconds [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Kubelet-Stats Kubelet stats collection for Windows nodes when windows is booted should return bootid within 10 seconds [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Kubelet-Stats Kubelet stats collection for Windows nodes when running 3 pods should return within 10 seconds [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Memory Limits [Serial] [Slow] Allocatable node memory should be equal to a calculated allocatable memory value [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Memory Limits [Serial] [Slow] attempt to deploy past allocatable memory limits should fail deployments of pods once there isn't enough memory [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletVersion:1.22] RebootHost containers [Serial] [Disruptive] [Slow] should run as a reboot process on the host/node [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should be able create pods and run containers with a given username [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should not be able to create pods with unknown usernames at Pod level [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should not be able to create pods with unknown usernames at Container level [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should override SecurityContext username if set [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should ignore Linux Specific SecurityContext if set [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should not be able to create pods with containers running as ContainerAdministrator when runAsNonRoot is true [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should not be able to create pods with containers running as CONTAINERADMINISTRATOR when runAsNonRoot is true [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should be able to create pod and run containers [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] Services should be able to create a functioning NodePort service for Windows [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Windows volume mounts check volume mount permissions container should have readOnly permissions on emptyDir [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Windows volume mounts check volume mount permissions container should have readOnly permissions on hostMapPath [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AggregatedDiscovery should support raw aggregated discovery endpoint Accept headers [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AggregatedDiscovery should support raw aggregated discovery request for CRDs [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AggregatedDiscovery should support aggregated discovery interface [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AggregatedDiscovery should support aggregated discovery interface for CRDs [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Aggregator Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] kube-apiserver identity [Feature:APIServerIdentity] kube-apiserver identity should persist after restart [Disruptive] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:APIServerIdentity": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ServerSideApply should create an applied object if it does not already exist [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ServerSideApply should work for subresources [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ServerSideApply should remove a field if it is owned but removed in the apply request [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ServerSideApply should not remove a field if an owner unsets the field but other managers still have ownership of the field [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ServerSideApply should ignore conflict errors if force apply is used [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ServerSideApply should work for CRDs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ServerSideApply should give up ownership of a field if forced applied by a controller [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Servers with support for API chunking should return chunks of results for list calls [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Servers with support for API chunking should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent [Slow] [Conformance] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] should be able to convert from CR v1 to CR v2 [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] should be able to convert a non homogeneous list of CRs [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD with validation schema [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD without validation schema [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD preserving unknown fields at the schema root [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD preserving unknown fields in an embedded object [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of different groups [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group but different versions [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group and version but different kinds [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] updates the published spec when one version gets renamed [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] removes definition from spec when one version gets changed to not be served [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] [Flaky] kubectl explain works for CR with the same resource name as built-in object. [Suite:k8s]", + "labels": { + "Flaky": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceFieldSelectors [Privileged:ClusterAdmin] [FeatureGate:CustomResourceFieldSelectors] [Beta] CustomResourceFieldSelectors MUST list and watch custom resources matching the field selector [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "FeatureGate:CustomResourceFieldSelectors": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST NOT fail to update a resource due to JSONSchema errors on unchanged correlatable fields [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST fail to update a resource due to JSONSchema errors on unchanged uncorrelatable fields [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST fail to update a resource due to JSONSchema errors on changed fields [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST NOT fail to update a resource due to CRD Validation Rule errors on unchanged correlatable fields [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST fail to update a resource due to CRD Validation Rule errors on unchanged uncorrelatable fields [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST fail to update a resource due to CRD Validation Rule errors on changed fields [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST NOT ratchet errors raised by transition rules [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST evaluate a CRD Validation Rule with oldSelf = nil for new values when optionalOldSelf is true [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST NOT fail validation for create of a custom resource that satisfies the x-kubernetes-validations rules [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail validation for create of a custom resource that does not satisfy the x-kubernetes-validations rules [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail create of a custom resource definition that contains a x-kubernetes-validations rule that refers to a property that do not exist [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail create of a custom resource definition that contains an x-kubernetes-validations rule that contains a syntax error [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail create of a custom resource definition that contains an x-kubernetes-validations rule that exceeds the estimated cost limit [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail create of a custom resource that exceeds the runtime cost limit for x-kubernetes-validations rule execution [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail update of a custom resource that does not satisfy a x-kubernetes-validations transition rule [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] CustomResourceDefinition Watch watch on custom resource definition objects [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition creating/deleting custom resource definition objects works [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition listing custom resource definition objects works [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition getting/updating/patching custom resource definition status sub-resource works [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] should include custom resource definition resources in discovery documents [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] custom resource defaulting for requests and from storage works [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Discovery should accurately determine present and missing resources [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Discovery Custom resource should have storage version hash [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Discovery should validate PreferredVersion for each APIGroup [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Discovery should locate the groupVersion and a resource within each APIGroup [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Etcd failure [Disruptive] should recover from network partition with master [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Etcd failure [Disruptive] should recover from SIGKILL [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] FieldValidation should detect unknown and duplicate fields of a typed object [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] FieldValidation should detect unknown metadata fields of a typed object [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] FieldValidation should create/apply a valid CR for CRD with validation schema [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] FieldValidation should create/apply a CR with unknown fields for CRD with no validation schema [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] FieldValidation should create/apply an invalid CR with extra properties for CRD with validation schema [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] FieldValidation should detect unknown metadata fields in both the root and embedded object of a CR [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] FieldValidation should detect duplicates in a CR when preserving unknown fields [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API priority and fairness should ensure that requests can be classified by adding FlowSchema and PriorityLevelConfiguration [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API priority and fairness should ensure that requests can't be drowned out (priority) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API priority and fairness should ensure that requests can't be drowned out (fairness) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API priority and fairness should support FlowSchema API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API priority and fairness should support PriorityLevelConfiguration API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should delete pods created by rc when not orphaning [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should orphan pods created by rc if delete options say so [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should orphan pods created by rc if deleteOptions.OrphanDependents is nil [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should delete RS created by deployment when not orphaning [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should not be blocked by dependency circle [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should support cascading deletion of custom resources [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should support orphan deletion of custom resources [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should delete jobs and pods created by cronjob [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Generated clientset should create pods, set the deletionTimestamp and deletionGracePeriodSeconds of the pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Generated clientset should create v1 cronJobs, delete cronJobs, watch cronJobs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] health handlers should contain necessary checks [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should ensure that all pods are removed when a namespace is deleted [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should ensure that all services are removed when a namespace is deleted [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should delete fast enough (90 percent of 100 namespaces in 150 seconds) [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should always delete fast (ALL of 100 namespaces in 150 seconds) [Feature:ComprehensiveNamespaceDraining] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:ComprehensiveNamespaceDraining": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should patch a Namespace [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should apply changes to a namespace status [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should apply an update to a Namespace [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should apply a finalizer to a Namespace [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] OpenAPIV3 should round trip OpenAPI V3 for all built-in group versions [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] OpenAPIV3 should publish OpenAPI V3 for CustomResourceDefinition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] OpenAPIV3 should contain OpenAPI V3 for Aggregated APIServer [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/json\" [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/vnd.kubernetes.protobuf\" [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/vnd.kubernetes.protobuf,application/json\" [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/json,application/vnd.kubernetes.protobuf\" [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Server request timeout should return HTTP status code 400 if the user specifies an invalid timeout in the request URL [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Server request timeout the request should be served with a default timeout if the specified timeout in the request URL exceeds maximum allowed [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Server request timeout default timeout should be used if the specified timeout in the request URL is 0s [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and ensure its status is promptly calculated. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a service. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a secret. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a pod. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a configMap. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replication controller. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replica set. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a ResourceClaim [Feature:DynamicResourceAllocation] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim with a storage class [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a custom resource. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should verify ResourceQuota with terminating scopes. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should verify ResourceQuota with best effort scope. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should be able to update and delete ResourceQuota. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should manage the lifecycle of a ResourceQuota [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should apply changes to a resourcequota status [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should verify ResourceQuota with best effort scope using scope-selectors. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should verify ResourceQuota with terminating scopes through scope selectors. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:PodPriority": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:PodPriority": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:PodPriority": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:PodPriority": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn). [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:PodPriority": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists). [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:PodPriority": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:PodPriority": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should verify ResourceQuota with cross namespace pod affinity scope using scope-selectors. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] server version should find the server version [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] StorageVersion resources [Feature:StorageVersionAPI] storage version with non-existing id should be GC'ed [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:StorageVersionAPI": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Servers with support for Table transformation should return pod details [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Servers with support for Table transformation should return chunks of table results for list calls [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Servers with support for Table transformation should return generic metadata details across all namespaces for nodes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Servers with support for Table transformation should return a 406 for a backend which does not implement metadata [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should validate against a Deployment [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should type check validation expressions [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should allow expressions to refer variables. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should type check a CRD [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should support ValidatingAdmissionPolicy API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should support ValidatingAdmissionPolicyBinding API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Watchers should observe add, update, and delete watch notifications on configmaps [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Watchers should be able to start watching from a specific resource version [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Watchers should be able to restart watching from the last resource version observed by the previous watch [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Watchers should observe an object deletion if it stops meeting the requirements of the selector [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Watchers should receive events on concurrent watches in same order [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API Streaming (aka. WatchList) [Serial] [Feature:WatchList] should be requested by informers when WatchListClient is enabled [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:WatchList": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API Streaming (aka. WatchList) [Serial] [Feature:WatchList] should be requested by client-go's List method when WatchListClient is enabled [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:WatchList": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API Streaming (aka. WatchList) [Serial] [Feature:WatchList] should be requested by dynamic client's List method when WatchListClient is enabled [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:WatchList": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should include webhook resources in discovery documents [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny pod and configmap creation [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny attaching pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny custom resource creation, update and deletion [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should unconditionally reject operations on fail closed webhook [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate configmap [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate pod and apply defaults after mutation [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should not be able to mutate or prevent deletion of webhook configuration objects [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should deny crd creation [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource with different stored version [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource with pruning [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should honor timeout [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] patching/updating a validating webhook should work [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] patching/updating a mutating webhook should work [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing validating webhooks should work [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing mutating webhooks should work [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to create and update validating webhook configurations with match conditions [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to create and update mutating webhook configurations with match conditions [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should reject validating webhook configurations with invalid match conditions [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should reject mutating webhook configurations with invalid match conditions [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate everything except 'skip-me' configmaps [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl delete interactive based on user confirmation input [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl exec should be able to execute 1000 times in a container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl logs logs should be able to retrieve and filter logs [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl logs default container logs the second container is the default-container by annotation should log default container if not specified [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl logs all pod logs the Deployment has 2 replicas and each pod has 2 containers should get logs from all pods based on default container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl logs all pod logs the Deployment has 2 replicas and each pod has 2 containers should get logs from each pod and each container in Deployment [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Update Demo should create and stop a replication controller [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Update Demo should scale a replication controller [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Guestbook application should create and stop a working application [Conformance] [Slow] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should support exec [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should support exec using resource/name [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should support exec through an HTTP proxy [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should support exec through kubectl proxy [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should return command exit codes execing into a container with a successful command [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should return command exit codes execing into a container with a failing command [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should return command exit codes should support port-forward [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should return command exit codes should handle in-cluster config [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod Kubectl run running a successful command [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod Kubectl run running a failing command [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod Kubectl run [Slow] running a failing command without --restart=Never [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod Kubectl run [Slow] running a failing command without --restart=Never, but with --rm [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod Kubectl run [Slow] running a failing command with --leave-stdin-open [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should support inline execution and attach [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should support inline execution and attach with websockets or fallback to spdy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should contain last line of the log [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl api-versions should check if v1 is in available api versions [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl get componentstatuses should get componentstatuses [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl prune with applyset should apply and prune objects [Disabled:RebaseInProgress] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl apply should apply a new configuration to an existing RC [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl apply should reuse port when apply to an existing SVC [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl apply apply set/view last-applied [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl diff should check if kubectl diff finds a difference for Deployments [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl server-side dry-run should check if kubectl can dry-run update Pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl validation should create/apply a CR with unknown fields for CRD with no validation schema [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl validation should create/apply a valid CR for CRD with validation schema [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl validation should create/apply an invalid/valid CR with arbitrary-extra properties for CRD with partially-specified validation schema [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl validation should detect unknown metadata fields in both the root and embedded object of a CR [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl validation should detect unknown metadata fields of a typed object [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl cluster-info should check if Kubernetes control plane services is included in cluster-info [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl cluster-info dump should check if cluster-info dump succeeds [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for rc and pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for cronjob [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl expose should create services for rc [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl label should update the label on a resource [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl copy should copy a file from a running Pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl patch should add annotations for pods in rc [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl version should check is all data is printed [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl run pod should create a pod from an image when restart is Never [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl replace should update a single-container pod's image [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Proxy server should support proxy with --port 0 [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Proxy server should support --unix-socket=/path [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl taint [Serial] should update the taint on a node [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl taint [Serial] should remove all the taints with the same key off a node [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl events should show event when pod is created [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl create quota should create a quota without scopes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl create quota should create a quota with scopes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl create quota should reject quota with invalid scopes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client kubectl wait should ignore not found error with --for=delete [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client kubectl subresource flag should not be used in a bulk GET [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client kubectl subresource flag GET on status subresource of built-in type (node) returns identical info as GET on the built-in type [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects a client request should support a client that connects, sends NO DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects a client request should support a client that connects, sends DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects NO client request should support a client that connects, sends DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 should support forwarding over websockets [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects a client request should support a client that connects, sends NO DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects a client request should support a client that connects, sends DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects NO client request should support a client that connects, sends DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on localhost should support forwarding over websockets [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl rollout undo undo should rollback and update deployment env [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + } +] \ No newline at end of file diff --git a/.snyk b/.snyk new file mode 100644 index 0000000000000..dd23598634792 --- /dev/null +++ b/.snyk @@ -0,0 +1,9 @@ +# References: +# https://docs.snyk.io/scan-applications/snyk-code/using-snyk-code-from-the-cli/excluding-directories-and-files-from-the-snyk-code-cli-test +# https://docs.snyk.io/snyk-cli/commands/ignore +exclude: + global: + - "**/vendor/**" + - "**/*_test.go" + - "**/testdata/**" + - "**/cluster/**" diff --git a/DOWNSTREAM_OWNERS b/DOWNSTREAM_OWNERS new file mode 100644 index 0000000000000..ad48a46ecdd6d --- /dev/null +++ b/DOWNSTREAM_OWNERS @@ -0,0 +1,32 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +filters: + ".*": + # Downstream reviewers, don't have to match those in OWNERS + reviewers: + - bertinatto + - deads2k + - jerpeter1 + - p0lyn0mial + - soltysh + - tkashem + + # Approvers are limited to the team that manages rebases and pays the price for carries that are introduced + approvers: + - bertinatto + - deads2k + - jerpeter1 + - p0lyn0mial + - soltysh + - tkashem + + "^\\.go.(mod|sum)$": + labels: + - "vendor-update" + "^vendor/.*": + labels: + - "vendor-update" + "^staging/.*": + labels: + - "vendor-update" +component: kube-apiserver diff --git a/LICENSES/vendor/github.com/Azure/go-ntlmssp/LICENSE b/LICENSES/vendor/github.com/Azure/go-ntlmssp/LICENSE new file mode 100644 index 0000000000000..5a2939deb7f46 --- /dev/null +++ b/LICENSES/vendor/github.com/Azure/go-ntlmssp/LICENSE @@ -0,0 +1,25 @@ += vendor/github.com/Azure/go-ntlmssp licensed under: = + +The MIT License (MIT) + +Copyright (c) 2016 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + += vendor/github.com/Azure/go-ntlmssp/LICENSE 7f7cc56311d298677f304d0ffce374d8 diff --git a/LICENSES/vendor/github.com/go-asn1-ber/asn1-ber/LICENSE b/LICENSES/vendor/github.com/go-asn1-ber/asn1-ber/LICENSE new file mode 100644 index 0000000000000..055d875bb132a --- /dev/null +++ b/LICENSES/vendor/github.com/go-asn1-ber/asn1-ber/LICENSE @@ -0,0 +1,26 @@ += vendor/github.com/go-asn1-ber/asn1-ber licensed under: = + +The MIT License (MIT) + +Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com) +Portions copyright (c) 2015-2016 go-asn1-ber Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + += vendor/github.com/go-asn1-ber/asn1-ber/LICENSE db7dfd3c609df968396fa379c3851eb5 diff --git a/LICENSES/vendor/github.com/go-ldap/ldap/v3/LICENSE b/LICENSES/vendor/github.com/go-ldap/ldap/v3/LICENSE new file mode 100644 index 0000000000000..fca294a60931e --- /dev/null +++ b/LICENSES/vendor/github.com/go-ldap/ldap/v3/LICENSE @@ -0,0 +1,26 @@ += vendor/github.com/go-ldap/ldap/v3 licensed under: = + +The MIT License (MIT) + +Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com) +Portions copyright (c) 2015-2016 go-ldap Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + += vendor/github.com/go-ldap/ldap/v3/LICENSE c3fcb38ec828f70d87d00a1c64cd9c2b diff --git a/LICENSES/vendor/github.com/openshift-eng/openshift-tests-extension/LICENSE b/LICENSES/vendor/github.com/openshift-eng/openshift-tests-extension/LICENSE new file mode 100644 index 0000000000000..f67fc367d04a2 --- /dev/null +++ b/LICENSES/vendor/github.com/openshift-eng/openshift-tests-extension/LICENSE @@ -0,0 +1,205 @@ += vendor/github.com/openshift-eng/openshift-tests-extension licensed under: = + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/openshift-eng/openshift-tests-extension/LICENSE 86d3f3a95c324c9479bd8986968f4327 diff --git a/LICENSES/vendor/github.com/openshift/api/LICENSE b/LICENSES/vendor/github.com/openshift/api/LICENSE new file mode 100644 index 0000000000000..6ab709a3c9d6c --- /dev/null +++ b/LICENSES/vendor/github.com/openshift/api/LICENSE @@ -0,0 +1,195 @@ += vendor/github.com/openshift/api licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2020 Red Hat, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/openshift/api/LICENSE 7ec75f465e2f8bee9a597f4b9d2921ba diff --git a/LICENSES/vendor/github.com/openshift/apiserver-library-go/LICENSE b/LICENSES/vendor/github.com/openshift/apiserver-library-go/LICENSE new file mode 100644 index 0000000000000..8c1731abc5e34 --- /dev/null +++ b/LICENSES/vendor/github.com/openshift/apiserver-library-go/LICENSE @@ -0,0 +1,205 @@ += vendor/github.com/openshift/apiserver-library-go licensed under: = + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/openshift/apiserver-library-go/LICENSE 86d3f3a95c324c9479bd8986968f4327 diff --git a/LICENSES/vendor/github.com/openshift/client-go/LICENSE b/LICENSES/vendor/github.com/openshift/client-go/LICENSE new file mode 100644 index 0000000000000..817bdf23d1ad9 --- /dev/null +++ b/LICENSES/vendor/github.com/openshift/client-go/LICENSE @@ -0,0 +1,195 @@ += vendor/github.com/openshift/client-go licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Red Hat, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/openshift/client-go/LICENSE 27bdb051f43ea9035ac160542145b43e diff --git a/LICENSES/vendor/github.com/openshift/library-go/LICENSE b/LICENSES/vendor/github.com/openshift/library-go/LICENSE new file mode 100644 index 0000000000000..9d7564fc4c5ea --- /dev/null +++ b/LICENSES/vendor/github.com/openshift/library-go/LICENSE @@ -0,0 +1,205 @@ += vendor/github.com/openshift/library-go licensed under: = + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/openshift/library-go/LICENSE 86d3f3a95c324c9479bd8986968f4327 diff --git a/LICENSES/vendor/go.uber.org/atomic/LICENSE b/LICENSES/vendor/go.uber.org/atomic/LICENSE new file mode 100644 index 0000000000000..d7259a2862c0f --- /dev/null +++ b/LICENSES/vendor/go.uber.org/atomic/LICENSE @@ -0,0 +1,23 @@ += vendor/go.uber.org/atomic licensed under: = + +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + += vendor/go.uber.org/atomic/LICENSE.txt 1caee86519456feda989f8a838102b50 diff --git a/LICENSES/vendor/gopkg.in/yaml.v2/LICENSE b/LICENSES/vendor/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 0000000000000..88be4ca082d37 --- /dev/null +++ b/LICENSES/vendor/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,205 @@ += vendor/gopkg.in/yaml.v2 licensed under: = + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/gopkg.in/yaml.v2/LICENSE e3fc50a88d0a364313df4b21ef20c29e diff --git a/README.openshift.md b/README.openshift.md new file mode 100644 index 0000000000000..b04871fc09c0f --- /dev/null +++ b/README.openshift.md @@ -0,0 +1,73 @@ +# OpenShift's fork of k8s.io/kubernetes + +This respository contains core Kubernetes components with OpenShift-specific patches. + +## Cherry-picking an upstream commit into openshift/kubernetes: Why, how, and when. + +`openshift/kubernetes` carries patches on top of each rebase in one of two ways: + +1. *periodic rebases* against an upstream Kubernetes tag. Eventually, +any code you have in upstream Kubernetes will land in Openshift via +this mechanism. + +2. Cherry-picked patches for important *bug fixes*. We really try to +limit feature back-porting entirely. Unless there are exceptional circumstances, your backport should at least be merged in kubernetes master branch. With every carry patch (not included in upstream) you are introducing a maintenance burden for the team managing rebases. + +### For Openshift newcomers: Pick my Kubernetes fix into Openshift vs. wait for the next rebase? + +Assuming you read the bullets above... If your patch is really far behind, for +example, if there have been 5 commits modifying the directory you care about, +cherry picking will be increasingly difficult and you should consider waiting +for the next rebase, which will likely include the commit you care about or at +least decrease the amount of cherry picks you need to do to merge. + +To really know the answer, you need to know *how many commits behind you are in +a particular directory*, often. + +To do this, just use git log, like so (using pkg/scheduler/ as an example). + +``` +MYDIR=pkg/scheduler/algorithm git log --oneline -- + ${MYDIR} | grep UPSTREAM | cut -d' ' -f 4-10 | head -1 +``` + +The commit message printed above will tell you: + +- what the LAST commit in Kubernetes was (which effected +"/pkg/scheduler/algorithm") +- directory, which will give you an intuition about how "hot" the code you are +cherry picking is. If it has changed a lot, recently, then that means you +probably will want to wait for a rebase to land. + +### Cherry-picking an upstream change + +Since `openshift/kubernetes` closely resembles `k8s.io/kubernetes`, +cherry-picking largely involves proposing upstream commits in a PR to our +downstream fork. Other than the usual potential for merge conflicts, the +commit messages for all commits proposed to `openshift/kubernetes` must +reflect the following: + +- `UPSTREAM: :` The prefix for upstream commits to ensure + correct handling during a future rebase. The person performing the rebase + will know to omit a commit with this prefix if the referenced PR is already + present in the new base history. +- `UPSTREAM: :` The prefix for downstream commits of code that is + generated (i.e. via `make update`) or that should not be retained by the + next rebase. +- `UPSTREAM: :` The prefix for downstream commits that maintain + downstream-specific behavior (i.e. to ensure an upstream change is + compatible with OpenShift). Commits with this are usually retained across + rebases. + +## Updating openshift/kubernetes to a new upstream release + +Instructions for rebasing `openshift/kubernetes` are maintained in a [separate +document](REBASE.openshift.md). + +## RPM Packaging + +A specfile is included in this repo which can be used to produce RPMs +including the openshift binary. While the specfile will be kept up to +date with build requirements the version is not updated. Building the +rpm with the `openshift-hack/build-rpms.sh` helper script will ensure +that the version is set correctly. diff --git a/REBASE.openshift.md b/REBASE.openshift.md new file mode 100644 index 0000000000000..68e185b3342bb --- /dev/null +++ b/REBASE.openshift.md @@ -0,0 +1,536 @@ +# Maintaining openshift/kubernetes + +OpenShift is based on upstream Kubernetes. With every release of Kubernetes that is +intended to be shipped as OCP, it is necessary to incorporate the upstream changes +while ensuring that our downstream customizations are maintained. + +## Rebasing for releases < 4.6 + +The instructions in this document apply to OpenShift releases 4.6 and +above. For previous releases, please see the [rebase +enhancement](https://github.com/openshift/enhancements/blob/master/enhancements/rebase.md). + +## Maintaining this document + +An openshift/kubernetes rebase is a complex process involving many manual and +potentially error-prone steps. If, while performing a rebase, you find areas where +the documented procedure is unclear or missing detail, please update this document +and include the change in the rebase PR. This will ensure that the instructions are +as comprehensive and accurate as possible for the person performing the next +rebase. + +## Rebase Checklists + +The checklists provided below highlight the key responsibilities of +someone performing an openshift/kubernetes rebase. + +In preparation for submitting a PR to the [openshift fork of +kubernetes](https://github.com/openshift/kubernetes), the following +should be true: + +- [ ] The new rebase branch has been created from the upstream tag +- [ ] The new rebase branch includes relevant carries from target branch +- [ ] Dependencies have been updated +- [ ] Hyperkube dockerfile version has been updated +- [ ] `make update` has been invoked and the results committed +- [ ] `make` executes without error +- [ ] `make verify` executes without error +- [ ] `make test` executes without error +- [ ] The upstream tag is pushed to `openshift/kubernetes` to ensure that + build artifacts are versioned correctly + - Upstream tooling uses the value of the most recent tag (e.g. `v1.25.0`) + in the branch history as the version of the binaries it builds. + - Pushing the tag is easy as +``` +git push git@github.com:openshift/kubernetes.git refs/tags/v1.25.0 +``` + +Details to include in the description of the PR: + +- [ ] A link to the rebase spreadsheet for the benefit for reviewers + +After the rebase PR has merged to `openshift/kubernetes`, vendor the changes +into `openshift/origin` to ensure that the openshift-tests binary reflects +the upstream test changes introduced by the rebase: + +- [ ] Find the SHA of the merge commit after your PR lands in `openshift/kubernetes` +- [ ] Run `hack/update-kube-vendor.sh ` in a clone of the `origin` + repo and commit the results +- [ ] Run `make update` and commit the results +- [ ] Submit as a PR to `origin` + +As a final step, send an email to the aos-devel mailing list announcing the +rebase. Make sure to include: + +- [ ] The new version of upstream Kubernetes that OpenShift is now based on +- [ ] Link(s) to upstream changelog(s) detailing what has changed since the last rebase landed +- [ ] A reminder to component maintainers to bump their dependencies +- [ ] Relevant details of the challenges involved in landing the rebase that + could benefit from a wider audience. + +## Getting started + +Before incorporating upstream changes you may want to: + +- Read this document +- Get familiar with tig (text-mode interface for git) +- Find the best tool for resolving merge conflicts +- Use diff3 conflict resolution strategy + (https://blog.nilbus.com/take-the-pain-out-of-git-conflict-resolution-use-diff3/) + +## Send email announcing you're starting work + +To better spread the information send the following email: + +``` +Title: k8s bump is starting... + +I'm starting the process of updating our fork to bring in +the latest available version of kubernetes. This means that +every PR landing in openshift/kubernetes should go through +extra scrutiny and only 2 exceptions allow merging PRs in the +upcoming time: +1. High priority backports which require landing master first +to start the backport process. +2. Critical PRs unblocking the org. +In both cases make sure to reach out to me for final approval. + +There is no ETA yet, but feel free to reach out to me with +any questions. +``` + +## Preparing the local repo clone + +Clone from a personal fork of kubernetes via a pushable (ssh) url: + +``` +git clone git@github.com:/kubernetes +``` + +Add a remote for upstream and fetch its branches: + +``` +git remote add --fetch upstream https://github.com/kubernetes/kubernetes +``` + +Add a remote for the openshift fork and fetch its branches: + +``` +git remote add --fetch openshift https://github.com/openshift/kubernetes +``` + +## Creating a new local branch for the new rebase + +- Branch the target `k8s.io/kubernetes` release tag (e.g. `v1.25.0`) to a new + local branch + +``` +git checkout -b rebase-1.25.0 v1.25.0 +``` + +- Merge `openshift(master)` branch into the `rebase-1.25.0` branch with merge + strategy `ours`. It discards all changes from the other branch (`openshift/master`) + and create a merge commit. This leaves the content of your branch unchanged, + and when you next merge with the other branch, Git will only consider changes made + from this point forward. (Do not confuse this with `ours` conflict resolution + strategy for `recursive` merge strategy, `-X` option.) + +``` +git merge -s ours openshift/master +``` + +## Creating a spreadsheet of carry commits from the previous release + +Given the upstream tag (e.g. `v1.24.2`) of the most recent rebase and the name +of the branch that is targeted for rebase (e.g. `openshift/master`), generate a tsv file +containing the set of carry commits that need to be considered for picking: + +``` +echo 'Comment Sha\tAction\tClean\tSummary\tCommit link\tPR link' > ~/Documents/v1.24.2.tsv +``` +``` +git log $( git merge-base openshift/master v1.24.2 )..openshift/master --ancestry-path --reverse --no-merges --pretty='tformat:%x09%h%x09%x09%x09%s%x09https://github.com/openshift/kubernetes/commit/%h?w=1' | grep -E $'\t''UPSTREAM: .*'$'\t' | sed -E 's~UPSTREAM: ([0-9]+)(:.*)~UPSTREAM: \1\2\thttps://github.com/kubernetes/kubernetes/pull/\1~' >> ~/Documents/v1.24.2.tsv +``` + +This tsv file can be imported into a google sheets spreadsheet to track the +progress of picking commits to the new rebase branch. The spreadsheet can also +be a way of communicating with rebase reviewers. For an example of this +communication, please see the [the spreadsheet used for the 1.24 +rebase](https://docs.google.com/spreadsheets/d/10KYptJkDB1z8_RYCQVBYDjdTlRfyoXILMa0Fg8tnNlY/edit). + +## Picking commits from the previous rebase branch to the new branch + +Go through the spreadsheet and for every commit set one of the appropriate actions: + - `p`, to pick the commit + - `s`, to squash it (add a comment with the sha of the target) + - `d`, to drop the commit (if it is not obvious, comment why) + +Set up conditional formatting in the google sheet to color these lines appropriately. + +Commits carried on rebase branches have commit messages prefixed as follows: + +- `UPSTREAM: :` + - A persistent carry that should probably be picked for the subsequent rebase branch. + - In general, these commits are used to modify behavior for consistency or + compatibility with openshift. +- `UPSTREAM: :` + - A carry that should probably not be picked for the subsequent rebase branch. + - In general, these commits are used to maintain the codebase in ways that are + branch-specific, like the update of generated files or dependencies. +- `UPSTREAM: 77870:` + - The number identifies a PR in upstream kubernetes + (i.e. `https://github.com/kubernetes/kubernetes/pull/`) + - A commit with this message should only be picked into the subsequent rebase branch + if the commits of the referenced PR are not included in the upstream branch. + - To check if a given commit is included in the upstream branch, open the referenced + upstream PR and check any of its commits for the release tag (e.g. `v.1.25.0`) + targeted by the new rebase branch. For example: + - + +With these guidelines in mind, pick the appropriate commits from the previous rebase +branch into the new rebase branch. Create a new filter view in the spreadsheet to allow +you get a view where `Action==p || Action==s` and copy paste the shas to `git cherry-pick` +command. Use `tr '\n' ' ' <<< ""` to get a space separated list +from the copy&paste. + +Where it makes sense to do so, squash carried changes that are tightly coupled to +simplify future rebases. If the commit message of a carry does not conform to +expectations, feel free to revise and note the change in the spreadsheet row for the +commit. + +If you first pick all the pick+squash commits first and push them for review it is easier for you +and your reviewers to check the code changes and you squash it at the end. + +When filling in Clean column in the spreadsheet make sure to use the following +number to express the complexity of the pick: +- 0 - clean +- 1 - format fixups +- 2 - code fixups +- 3 - logic changes + +Explicit commit rules: +- Anything touching `openshift-hack/`, openshift specific READMEs or similar files + should be squashed to 1 commit named "UPSTREAM: : Add OpenShift specific files" +- Updating generated files coming from kubernetes should be `` commit +- Generated changes should never be mixed with non-generated changes. If a carry is + ever seen to contain generated changes, those changes should be dropped. + +## Update the hyperkube image version to the release tag + +The [hyperkube image](openshift-hack/images/hyperkube/Dockerfile.rhel) +hard-codes the Kubernetes version in an image label. It's necessary to manually +set this label to the new release tag. Prefix the commit summary with +`UPSTREAM: : (squash)` and squash it before merging the rebase PR. + +This value, among other things, is used by ART to inject appropriate version of +kubernetes during build process, so it always has to reflect correct level of +kubernetes. + +## Update base-os and test images + +To be able to use the latest kubelet from a pull request, the openshift/release +job layers the built RPM [on top of the `rhel-coreos` image](https://github.com/openshift/release/blob/78568fbde1ee9a15bc6ab08c7c49ae3539d3e302/ci-operator/config/openshift/kubernetes/openshift-kubernetes-master.yaml#L102-L113). +Make sure that the `FROM` uses the appropriate OCP version which corresponds +with what we have in the [hyperkube image](openshift-hack/images/hyperkube/Dockerfile.rhel). + +Similarly, update `FROM` in [test image](openshift-hack/images/tests/Dockerfile.rhel) +to match the one from [hyperkube image](openshift-hack/images/hyperkube/Dockerfile.rhel). + +## Updating dependencies + +Once the commits are all picked from the previous rebase branch, and your PR +is mostly ready, each of the following repositories need to be updated to depend +on the upstream tag targeted by the rebase: + +- https://github.com/openshift/api +- https://github.com/openshift/apiserver-library-go +- https://github.com/openshift/client-go +- https://github.com/openshift/library-go + +Often these repositories are updated in parallel by other team members, so make +sure to ask around before starting the work of bumping their dependencies. + +Once the above repos have been updated to depend on the target release, +it will be necessary to update `go.mod` to point to the appropriate revision +of these repos by running `hack/pin-dependency.sh` for each of them and then running +`hack/update-vendor.sh` (as per the [upstream documentation](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/vendor.md#adding-or-updating-a-dependency)). + +Make sure to commit the result of a vendoring update with `UPSTREAM: : bump(*)`. +If you have already bumped the dependencies to get the repo to compile, +don't forget to squash the commits before merging the PR. + +### Updating dependencies for pending bumps + +The upstream `hack/pin-dependency.sh` script only supports setting dependency +for the original repository. To pin to a fork branch that has not yet been +merged (i.e. to test a rebase ahead of shared library bumps having merged), the +following `go mod` invocations are suggested: + +``` +go mod edit -replace github.com/openshift/=github.com//@SHA +go mod tidy && go mod vendor +``` + +Alternatively, you can edit `go.mod` file manually with your favourite editor and use search&replace. + +## Review test annotation rules + +The names of upstream e2e tests are annotated according to the a set of +[declarative rules](openshift-hack/e2e/annotate/rules.go). These annotations +are used to group tests into suites and to skip tests that are known not to be +incompatible with some or all configurations of OpenShift. + +When performing a rebase, it is important to review the rules to +ensure they are still relevant: + +- [ ] Ensure that `[Disabled:Alpha]` rules are appropriate for the current kube + level. Alpha features that are not enabled by default should be targeted + by this annotation to ensure that tests of those features are skipped. +- [ ] Add new skips (along with a bz to track resolution) where e2e tests fail + consistently. + +Test failures representing major issues affecting cluster capability will +generally need to be addressed before merge of the rebase PR, but minor issues +(e.g. tests that fail to execute correctly but don't appear to reflect a +regression in behavior) can often be skipped and addressed post-merge. + +## Updating generated files + +- Update generated files by running `make update` + - This step depends on etcd being installed in the path, which can be + accomplished by running `hack/install-etcd.sh`. + - Alternatively, run it in the same container as CI is using for build_root that already has + the etcd at correct version +``` +podman run -it --rm -v $( pwd ):/go/k8s.io/kubernetes:Z --workdir=/go/k8s.io/kubernetes registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.20-openshift-4.15 make update OS_RUN_WITHOUT_DOCKER=yes FORCE_HOST_GO=1 +``` +- Commit the resulting changes as `UPSTREAM: : make update`. + +## Building and testing + +- Build the code with `make` +- Test the code with `make test` + - Where test failures are encountered and can't be trivially resolved, the + spreadsheet can be used to track those failures to their resolution. The + example spreadsheet should have a sheet that demonstrates this tracking. + - Where a test failure proves challenging to fix without specialized knowledge, + make sure to coordinate with the team(s) responsible for area(s) of focus + exhibiting test failure. If in doubt, ask for help! +- Verify the code with `make verify` + +## Reacting to new commits + +Inevitably, a rebase will take long enough that new commits will end up being +merged to the targeted openshift/kubernetes branch after the rebase is +underway. The following strategy is suggested to minimize the cost of incorporating +these new commits: + +- rename existing rebase branch (e.g. 1.25.0-beta.2 -> 1.25.0-beta.2-old) +- create new rebase branch from HEAD of master +- merge the target upstream tag (e.g. 1.25.0-beta.2) with strategy ours +- pick all carries from renamed rebase branch (e.g. 1.25.0-beta.2-old) +- pick new carries from the openshift/kubernetes target branch +- add details of the new carries to the spreadsheet +- update generated files + +With good tooling, the cost of this procedure should be ~10 minutes at +most. Re-picking carries should not result in conflicts since the base of the +rebase branch will be the same as before. The only potential sources of conflict +will be the newly added commits. + +## Ensuring the stability of the release + +To ensure we don't regress the product by introducing a new level of kubernetes +it is required to create a new sheet in the following spreadsheet and pass all +the variants: https://docs.google.com/spreadsheets/d/1PBk3eqYaPbvY982k_a0W7EGx7CBCHTmKrN6FyNSTDeA/edit#gid=0 + +NOTE: Double check with TRT team if the current variants in that spreadsheet +are up-to-date. + +## Send email announcing upcoming merge + +Second email should be send close O(~3 days) to merging the bump: + +``` +Title: k8s bump landing... + + is bumping k8s to version . +The following repositories have been already bumped as well: + + + +Followup work has been assigned to appropriate teams +through bugzillas linked in the code. Please treat +them as the highest priority after landing the bump. + +Finally, this means we are blocking ALL PRs to our +kubernetes fork. +``` + +After sending the email block the merge queue, see below. + +## Blocking the merge queue + +Close to merging a rebase it is good practice to block any merges to openshift/kubernetes +fork. To do that follow these steps: + +1. Open new issues in openshift/kubernetes +2. Use `Master Branch Frozen For Kubernetes Merging | branch:master` as issue title +3. Add `tide/merge-blocker` label to issues (you might need group lead for this) +4. All PR's (including the rebase) are now forbidden to merge to master branch +5. Before landing the rebase PR, close this issue + +## Send email announcing work done + +Last email should be send after merging the bump as a +reply to previous: + +``` + just merged. +It'll take some time to get newer kublet, but in the meantime we'll +continue to monitor CI. I encourage everyone to hold off from +merging any major changes to our kubernetes fork to provide clear CI +signal for the next 2-3 days. + +The following bugs were opened during the process, please treat +them as the highest priority and release blockers for your team: + + +``` + +## Followup work + +1. Update cluster-kube-apiserver-operator `pre-release-lifecycle` alert's +`removed_release` version similarly to https://github.com/openshift/cluster-kube-apiserver-operator/pull/1382. + +## Updating with `git merge` + +*This is the preferred way to update to patch releases of kubernetes* + +After the initial bump as described above it is possible to update +to newer released version using `git merge`. To do that follow these steps: + + +1. Fetch latest upstream changes: +``` +git fetch upstream +``` + where `upstream` points at https://github.com/kubernetes/kubernetes/, and check + the incoming changes: +``` +git log v1.25.0..v1.25.2 --ancestry-path --reverse --no-merges +``` +2. (optional) Revert any commits that were merged into kubernetes between previous + update and current one. + +3. Fetch latest state of openshift fork, checkout the appropriate branch and + create a new branch for the bump +``` +git fetch openshift +git checkout openshift/release-4.12 +git checkout -b bump-1.25.2 +``` + where `openshift` points at https://github.com/openshift/kubernetes/. + +4. Merge the changes from appropriate [released version](https://kubernetes.io/releases/patch-releases/#detailed-release-history-for-active-branches): +``` +git merge v1.25.2 +``` + Most likely you'll encounter conflicts, but most are around go.sum and go.mod + files, coming from newer versions, but at this point in time leave the conflicts + as they are and continue the merge. +``` +git add --all +git merge --continue +``` + This should create a commit titled `Merge tag 'v1.25.2' into bump-1.25.2`. + +5. Now return to the list of conflicts from previous step and fix all the files + picking appropriate changes, in most cases picking the newer version. + When done, commit all of them as another commit: +``` +git add --all +git commit -m "UPSTREAM: : manually resolve conflicts" +``` + This ensures the person reviewing the bump can easily review all the conflicts + and their resolution. + +6. (optional) Update openshift dependencies and run `go mod tidy` to have the + branch names resolved to proper go mod version. Remember to use the released + versions matching the branch you're modifying. + This is usually required ONLY if you know there has been changes in one of + the libraries that need to be applied to our fork, which happens rarely. + Also usually, this is done by the team introducing the changes in the libraries. + +7. Run `/bin/bash` in a container using the command and image described in [Updating generated files](#updating-generated-files) + section: +``` +podman run -it --rm -v $( pwd ):/go/k8s.io/kubernetes:Z --workdir=/go/k8s.io/kubernetes registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.20-openshift-4.15 /bin/bash +``` + In the container run: +``` +export OS_RUN_WITHOUT_DOCKER=yes +export FORCE_HOST_GO=1 +hack/update-vendor.sh +make update +``` + +NOTE: Make sure to use the correct version of the image (both openshift and golang +versions must be appropriate), as a reference check `openshift-hack/images/hyperkube/Dockerfile.rhel` +file. + +NOTE: You might encounter problems when running the above, make sure to check [Potential problems](#potential-problems) +section below. + + +8. Update kubernetes version in `openshift-hack/images/hyperkube/Dockerfile.rhel` + and commit all of that as: +``` +git commit -m "UPSTREAM: : hack/update-vendor.sh, make update and update image" +``` + +9. Congratulations, you can open a PR with updated k8s patch version! + +### Potential problems + +While running `make update` in step 7 above, you might encounter one of the following problems: + +``` +go: inconsistent vendoring in /go/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/code-generator: +``` +To solve it, edit `staging/src/k8s.io/code-generator/go.mod` removing this line: `k8s.io/code-generator => ../code-generator`. +Try re-running `make update`, if the problem re-appears change directory to `staging/src/k8s.io/code-generator` +and run `go mod tidy` and `go mod vendor`. + +NOTE: Make sure to bring back this line: `k8s.io/code-generator => ../code-generator` in `staging/src/k8s.io/code-generator/go.mod` +after you've run `make update`, otherwise `verify` step will fail during submission. + +``` +etcd version 3.5.6 or greater required +``` +Grab newer version of etcd from https://github.com/etcd-io/etcd/releases/ and place +it in `/usr/local/bin/etcd`. + +## Updating with `rebase.sh` (experimental) + +The above steps are available as a script that will merge and rebase along the happy path without automatic conflict +resolution and at the end will create a PR for you. + +Here are the steps: +1. Create a new BugZilla with the respective OpenShift version to rebase (Target Release stays ---), + Prio&Severity to High with a proper description of the change logs. + See [BZ2021468](https://bugzilla.redhat.com/show_bug.cgi?id=2021468) as an example. +2. It's best to start off with a fresh fork of [openshift/kubernetes](https://github.com/openshift/kubernetes/). Stay on the master branch. +3. This script requires `jq`, `git`, `podman` and `bash`, `gh` is optional. +4. In the root dir of that fork run: +``` +openshift-hack/rebase.sh --k8s-tag=v1.25.2 --openshift-release=release-4.12 --bugzilla-id=2003027 +``` + +where `k8s-tag` is the [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes/) release tag, the `openshift-release` +is the OpenShift release branch in [openshift/kubernetes](https://github.com/openshift/kubernetes/) and the `bugzilla-id` is the +BugZilla ID created in step (1). + +5. In case of conflicts, it will ask you to step into another shell to resolve those. The script will continue by committing the resolution with `UPSTREAM: `. +6. At the end, there will be a "rebase-$VERSION" branch pushed to your fork. +7. If you have `gh` installed and are logged in, it will attempt to create a PR for you by opening a web browser. diff --git a/build/pause/Dockerfile.Rhel b/build/pause/Dockerfile.Rhel new file mode 100644 index 0000000000000..5dc852525b06d --- /dev/null +++ b/build/pause/Dockerfile.Rhel @@ -0,0 +1,12 @@ +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS builder +WORKDIR /go/src/github.com/openshift/kubernetes/build/pause +COPY . . +RUN mkdir -p bin && \ + gcc -Os -Wall -Werror -o bin/pause ./linux/pause.c + +FROM registry.ci.openshift.org/ocp/4.19:base-rhel9 +COPY --from=builder /go/src/github.com/openshift/kubernetes/build/pause/bin/pause /usr/bin/pod +LABEL io.k8s.display-name="OpenShift Pod" \ + io.k8s.description="This is a component of OpenShift and contains the binary that holds the pod namespaces." \ + io.openshift.tags="openshift" +ENTRYPOINT [ "/usr/bin/pod" ] diff --git a/build/run.sh b/build/run.sh index 3ecc2dacb7789..c0eb0b83270b0 100755 --- a/build/run.sh +++ b/build/run.sh @@ -25,6 +25,12 @@ set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. source "$KUBE_ROOT/build/common.sh" +# Allow running without docker (e.g. in openshift ci) +if [[ "${OS_RUN_WITHOUT_DOCKER:-}" ]]; then + "${@}" + exit 0 +fi + KUBE_RUN_COPY_OUTPUT="${KUBE_RUN_COPY_OUTPUT:-y}" kube::build::verify_prereqs diff --git a/cmd/kube-apiserver/.import-restrictions b/cmd/kube-apiserver/.import-restrictions index 32b74c8868fc2..38c8633e00b7a 100644 --- a/cmd/kube-apiserver/.import-restrictions +++ b/cmd/kube-apiserver/.import-restrictions @@ -2,6 +2,7 @@ rules: - selectorRegexp: k8s[.]io/kubernetes allowedPrefixes: - k8s.io/kubernetes/cmd/kube-apiserver + - k8s.io/kubernetes/openshift-kube-apiserver - k8s.io/kubernetes/pkg - k8s.io/kubernetes/plugin - k8s.io/kubernetes/test/utils diff --git a/cmd/kube-apiserver/app/options/options.go b/cmd/kube-apiserver/app/options/options.go index ebed12af1d692..17c940a734dcb 100644 --- a/cmd/kube-apiserver/app/options/options.go +++ b/cmd/kube-apiserver/app/options/options.go @@ -61,6 +61,8 @@ type Extra struct { EndpointReconcilerType string MasterCount int + + OpenShiftConfig string } // NewServerRunOptions creates and returns ServerRunOptions according to the given featureGate and effectiveVersion of the server binary to run. @@ -156,5 +158,9 @@ func (s *ServerRunOptions) Flags() (fss cliflag.NamedFlagSets) { "The number of apiservers running in the cluster, must be a positive number. (In use when --endpoint-reconciler-type=master-count is enabled.)") fs.MarkDeprecated("apiserver-count", "apiserver-count is deprecated and will be removed in a future version.") + fs.StringVar(&s.OpenShiftConfig, "openshift-config", s.OpenShiftConfig, "config for openshift") + fs.MarkDeprecated("openshift-config", "to be removed") + fs.MarkHidden("openshift-config") + return fss } diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index 8aa05a4d8f891..ae2259c26c38e 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -25,6 +25,10 @@ import ( "net/url" "os" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/admissionenablement" + "k8s.io/kubernetes/openshift-kube-apiserver/enablement" + "k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver" + "github.com/spf13/cobra" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -67,8 +71,7 @@ func NewAPIServerCommand() *cobra.Command { _, featureGate := featuregate.DefaultComponentGlobalsRegistry.ComponentGlobalsOrRegister( featuregate.DefaultKubeComponent, utilversion.DefaultBuildEffectiveVersion(), utilfeature.DefaultMutableFeatureGate) s := options.NewServerRunOptions() - ctx := genericapiserver.SetupSignalContext() - + ctx := genericapiserver.SetupSignalContextNotExiting() cmd := &cobra.Command{ Use: "kube-apiserver", Long: `The Kubernetes API server validates and configures data @@ -97,6 +100,39 @@ cluster's shared state through which all other components interact.`, } cliflag.PrintFlags(fs) + if len(s.OpenShiftConfig) > 0 { + // if we are running openshift, we modify the admission chain defaults accordingly + admissionenablement.InstallOpenShiftAdmissionPlugins(s) + + openshiftConfig, err := enablement.GetOpenshiftConfig(s.OpenShiftConfig) + if err != nil { + klog.Fatal(err) + } + enablement.ForceOpenShift(openshiftConfig) + + args, err := openshiftkubeapiserver.ConfigToFlags(openshiftConfig) + if err != nil { + return err + } + + // hopefully this resets the flags? + if err := cmd.ParseFlags(args); err != nil { + return err + } + // initialize feature gates again with the new flags + if err := featuregate.DefaultComponentGlobalsRegistry.Set(); err != nil { + return err + } + + // print merged flags (merged from OpenshiftConfig) + cliflag.PrintFlags(cmd.Flags()) + + enablement.ForceGlobalInitializationForOpenShift() + } else { + // print default flags + cliflag.PrintFlags(cmd.Flags()) + } + // set default options completedOptions, err := s.Complete(ctx) if err != nil { @@ -120,6 +156,7 @@ cluster's shared state through which all other components interact.`, return nil }, } + cmd.SetContext(ctx) fs := cmd.Flags() diff --git a/cmd/kube-controller-manager/app/apps.go b/cmd/kube-controller-manager/app/apps.go index 20f9523d050e6..b1f84a480de21 100644 --- a/cmd/kube-controller-manager/app/apps.go +++ b/cmd/kube-controller-manager/app/apps.go @@ -41,8 +41,11 @@ func newDaemonSetControllerDescriptor() *ControllerDescriptor { } } func startDaemonSetController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) { - dsc, err := daemon.NewDaemonSetsController( + dsc, err := daemon.NewNodeSelectorAwareDaemonSetsController( ctx, + controllerContext.OpenShiftContext.OpenShiftDefaultProjectNodeSelector, + controllerContext.OpenShiftContext.KubeDefaultProjectNodeSelector, + controllerContext.InformerFactory.Core().V1().Namespaces(), controllerContext.InformerFactory.Apps().V1().DaemonSets(), controllerContext.InformerFactory.Apps().V1().ControllerRevisions(), controllerContext.InformerFactory.Core().V1().Pods(), diff --git a/cmd/kube-controller-manager/app/certificates.go b/cmd/kube-controller-manager/app/certificates.go index 062454b243b27..cd2d0c98ba292 100644 --- a/cmd/kube-controller-manager/app/certificates.go +++ b/cmd/kube-controller-manager/app/certificates.go @@ -31,6 +31,7 @@ import ( "k8s.io/controller-manager/controller" "k8s.io/klog/v2" "k8s.io/kubernetes/cmd/kube-controller-manager/names" + "k8s.io/kubernetes/openshift-kube-controller-manager/servicecacertpublisher" "k8s.io/kubernetes/pkg/controller/certificates/approver" "k8s.io/kubernetes/pkg/controller/certificates/cleaner" ctbpublisher "k8s.io/kubernetes/pkg/controller/certificates/clustertrustbundlepublisher" @@ -298,3 +299,24 @@ func getKubeAPIServerCAFileContents(controllerContext ControllerContext) ([]byte return rootCA, nil } + +func newServiceCACertPublisher() *ControllerDescriptor { + return &ControllerDescriptor{ + name: names.ServiceCACertificatePublisherController, + aliases: []string{"service-ca-cert-publisher"}, + initFunc: startServiceCACertPublisher, + } +} + +func startServiceCACertPublisher(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) { + sac, err := servicecacertpublisher.NewPublisher( + controllerContext.InformerFactory.Core().V1().ConfigMaps(), + controllerContext.InformerFactory.Core().V1().Namespaces(), + controllerContext.ClientBuilder.ClientOrDie("service-ca-cert-publisher"), + ) + if err != nil { + return nil, true, fmt.Errorf("error creating service CA certificate publisher: %v", err) + } + go sac.Run(1, ctx.Done()) + return nil, true, nil +} diff --git a/cmd/kube-controller-manager/app/config/config.go b/cmd/kube-controller-manager/app/config/config.go index 3034f288d91eb..efadc56383b14 100644 --- a/cmd/kube-controller-manager/app/config/config.go +++ b/cmd/kube-controller-manager/app/config/config.go @@ -26,6 +26,8 @@ import ( // Config is the main context object for the controller manager. type Config struct { + OpenShiftContext OpenShiftContext + ComponentConfig kubectrlmgrconfig.KubeControllerManagerConfiguration SecureServing *apiserver.SecureServingInfo diff --git a/cmd/kube-controller-manager/app/config/patch.go b/cmd/kube-controller-manager/app/config/patch.go new file mode 100644 index 0000000000000..3f79b64304c25 --- /dev/null +++ b/cmd/kube-controller-manager/app/config/patch.go @@ -0,0 +1,19 @@ +package config + +import ( + "k8s.io/client-go/transport" + + "github.com/openshift/library-go/pkg/monitor/health" +) + +// OpenShiftContext is additional context that we need to launch the kube-controller-manager for openshift. +// Basically, this holds our additional config information. +type OpenShiftContext struct { + OpenShiftConfig string + OpenShiftDefaultProjectNodeSelector string + KubeDefaultProjectNodeSelector string + UnsupportedKubeAPIOverPreferredHost bool + PreferredHostRoundTripperWrapperFn transport.WrapperFunc + PreferredHostHealthMonitor *health.Prober + CustomRoundTrippers []transport.WrapperFunc +} diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index fc3f899d3a7ba..4b18e74aa1326 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -38,6 +38,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/healthz" "k8s.io/apiserver/pkg/server/mux" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -79,6 +80,8 @@ import ( serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount" kubefeatures "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/serviceaccount" + + libgorestclient "github.com/openshift/library-go/pkg/config/client" ) func init() { @@ -132,15 +135,27 @@ controller, and serviceaccounts controller.`, } cliflag.PrintFlags(cmd.Flags()) + if err := SetUpCustomRoundTrippersForOpenShift(s); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } + c, err := s.Config(KnownControllers(), ControllersDisabledByDefault(), ControllerAliases()) if err != nil { return err } + if err := ShimForOpenShift(s, c); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return err + } + // add feature enablement metrics fg := s.ComponentGlobalsRegistry.FeatureGateFor(featuregate.DefaultKubeComponent) fg.(featuregate.MutableFeatureGate).AddMetrics() - return Run(context.Background(), c.Complete()) + + stopCh := server.SetupSignalHandler() + return Run(context.Background(), c.Complete(), stopCh) }, Args: func(cmd *cobra.Command, args []string) error { for _, arg := range args { @@ -177,9 +192,9 @@ func ResyncPeriod(c *config.CompletedConfig) func() time.Duration { } // Run runs the KubeControllerManagerOptions. -func Run(ctx context.Context, c *config.CompletedConfig) error { +func Run(ctx context.Context, c *config.CompletedConfig, stopCh2 <-chan struct{}) error { logger := klog.FromContext(ctx) - stopCh := ctx.Done() + stopCh := mergeCh(ctx.Done(), stopCh2) // To help debugging, immediately log version logger.Info("Starting", "version", utilversion.Get()) @@ -197,6 +212,17 @@ func Run(ctx context.Context, c *config.CompletedConfig) error { logger.Error(err, "Unable to register configz") } + // start the localhost health monitor early so that it can be used by the LE client + if c.OpenShiftContext.PreferredHostHealthMonitor != nil { + hmCtx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + <-stopCh + cancel() + }() + go c.OpenShiftContext.PreferredHostHealthMonitor.Run(hmCtx) + } + // Setup any healthz checks we will want to use. var checks []healthz.HealthChecker var electionChecker *leaderelection.HealthzAdaptor @@ -325,10 +351,18 @@ func Run(ctx context.Context, c *config.CompletedConfig) error { run(ctx, controllerDescriptors) }, OnStoppedLeading: func() { - logger.Error(nil, "leaderelection lost") - klog.FlushAndExit(klog.ExitFlushTimeout, 1) + select { + case <-stopCh: + // We were asked to terminate. Exit 0. + klog.Info("Requested to terminate. Exiting.") + os.Exit(0) + default: + // We lost the lock. + logger.Error(nil, "leaderelection lost") + klog.FlushAndExit(klog.ExitFlushTimeout, 1) + } }, - }) + }, stopCh) // If Leader Migration is enabled, proceed to attempt the migration lock. if leaderMigrator != nil { @@ -352,10 +386,18 @@ func Run(ctx context.Context, c *config.CompletedConfig) error { run(ctx, controllerDescriptors) }, OnStoppedLeading: func() { - logger.Error(nil, "migration leaderelection lost") - klog.FlushAndExit(klog.ExitFlushTimeout, 1) + select { + case <-stopCh: + // We were asked to terminate. Exit 0. + klog.Info("Requested to terminate. Exiting.") + os.Exit(0) + default: + // We lost the lock. + logger.Error(nil, "migration leaderelection lost") + klog.FlushAndExit(klog.ExitFlushTimeout, 1) + } }, - }) + }, stopCh) } <-stopCh @@ -364,6 +406,8 @@ func Run(ctx context.Context, c *config.CompletedConfig) error { // ControllerContext defines the context object for controller type ControllerContext struct { + OpenShiftContext config.OpenShiftContext + // ClientBuilder will provide a client for this controller to use ClientBuilder clientbuilder.ControllerClientBuilder @@ -483,9 +527,7 @@ func ControllersDisabledByDefault() []string { controllersDisabledByDefault = append(controllersDisabledByDefault, name) } } - sort.Strings(controllersDisabledByDefault) - return controllersDisabledByDefault } @@ -571,6 +613,7 @@ func NewControllerDescriptors() map[string]*ControllerDescriptor { register(newTTLAfterFinishedControllerDescriptor()) register(newRootCACertificatePublisherControllerDescriptor()) register(newKubeAPIServerSignerClusterTrustBundledPublisherDescriptor()) + register(newServiceCACertPublisher()) register(newEphemeralVolumeControllerDescriptor()) // feature gated @@ -607,7 +650,12 @@ func CreateControllerContext(ctx context.Context, s *config.CompletedConfig, roo } versionedClient := rootClientBuilder.ClientOrDie("shared-informers") - sharedInformers := informers.NewSharedInformerFactoryWithOptions(versionedClient, ResyncPeriod(s)(), informers.WithTransform(trim)) + var sharedInformers informers.SharedInformerFactory + if InformerFactoryOverride == nil { + sharedInformers = informers.NewSharedInformerFactoryWithOptions(versionedClient, ResyncPeriod(s)(), informers.WithTransform(trim)) + } else { + sharedInformers = InformerFactoryOverride + } metadataClient := metadata.NewForConfigOrDie(rootClientBuilder.ConfigOrDie("metadata-informers")) metadataInformers := metadatainformer.NewSharedInformerFactoryWithOptions(metadataClient, ResyncPeriod(s)(), metadatainformer.WithTransform(trim)) @@ -627,6 +675,7 @@ func CreateControllerContext(ctx context.Context, s *config.CompletedConfig, roo }, 30*time.Second, ctx.Done()) controllerContext := ControllerContext{ + OpenShiftContext: s.OpenShiftContext, ClientBuilder: clientBuilder, InformerFactory: sharedInformers, ObjectOrMetadataInformerFactory: informerfactory.NewInformerFactory(sharedInformers, metadataInformers), @@ -808,10 +857,10 @@ func startServiceAccountTokenController(ctx context.Context, controllerContext C controllerContext.InformerFactory.Core().V1().ServiceAccounts(), controllerContext.InformerFactory.Core().V1().Secrets(), rootClientBuilder.ClientOrDie("tokens-controller"), - serviceaccountcontroller.TokensControllerOptions{ + applyOpenShiftServiceServingCertCA(serviceaccountcontroller.TokensControllerOptions{ TokenGenerator: tokenGenerator, RootCA: rootCA, - }, + }), ) if err != nil { return nil, true, fmt.Errorf("error creating Tokens controller: %v", err) @@ -845,7 +894,7 @@ func createClientBuilders(c *config.CompletedConfig) (clientBuilder clientbuilde if c.ComponentConfig.KubeCloudShared.UseServiceAccountCredentials { clientBuilder = clientbuilder.NewDynamicClientBuilder( - restclient.AnonymousClientConfig(c.Kubeconfig), + libgorestclient.AnonymousClientConfigWithWrapTransport(c.Kubeconfig), c.Client.CoreV1(), metav1.NamespaceSystem) } else { @@ -856,7 +905,7 @@ func createClientBuilders(c *config.CompletedConfig) (clientBuilder clientbuilde // leaderElectAndRun runs the leader election, and runs the callbacks once the leader lease is acquired. // TODO: extract this function into staging/controller-manager -func leaderElectAndRun(ctx context.Context, c *config.CompletedConfig, lockIdentity string, electionChecker *leaderelection.HealthzAdaptor, resourceLock string, leaseName string, callbacks leaderelection.LeaderCallbacks) { +func leaderElectAndRun(ctx context.Context, c *config.CompletedConfig, lockIdentity string, electionChecker *leaderelection.HealthzAdaptor, resourceLock string, leaseName string, callbacks leaderelection.LeaderCallbacks, stopCh <-chan struct{}) { logger := klog.FromContext(ctx) rl, err := resourcelock.NewFromKubeconfig(resourceLock, c.ComponentConfig.Generic.LeaderElection.ResourceNamespace, @@ -872,7 +921,13 @@ func leaderElectAndRun(ctx context.Context, c *config.CompletedConfig, lockIdent klog.FlushAndExit(klog.ExitFlushTimeout, 1) } - leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ + leCtx, cancel := context.WithCancel(ctx) + defer cancel() + go func() { + <-stopCh + cancel() + }() + leaderelection.RunOrDie(leCtx, leaderelection.LeaderElectionConfig{ Lock: rl, LeaseDuration: c.ComponentConfig.Generic.LeaderElection.LeaseDuration.Duration, RenewDeadline: c.ComponentConfig.Generic.LeaderElection.RenewDeadline.Duration, diff --git a/cmd/kube-controller-manager/app/controllermanager_test.go b/cmd/kube-controller-manager/app/controllermanager_test.go index 88af44ed6d6fa..66bb3c5fcabfc 100644 --- a/cmd/kube-controller-manager/app/controllermanager_test.go +++ b/cmd/kube-controller-manager/app/controllermanager_test.go @@ -90,6 +90,7 @@ func TestControllerNamesDeclaration(t *testing.T) { names.TTLAfterFinishedController, names.RootCACertificatePublisherController, names.KubeAPIServerClusterTrustBundlePublisherController, + names.ServiceCACertificatePublisherController, names.EphemeralVolumeController, names.StorageVersionGarbageCollectorController, names.ResourceClaimController, diff --git a/cmd/kube-controller-manager/app/options/options.go b/cmd/kube-controller-manager/app/options/options.go index 7556d946922f6..a6067970b021c 100644 --- a/cmd/kube-controller-manager/app/options/options.go +++ b/cmd/kube-controller-manager/app/options/options.go @@ -56,6 +56,8 @@ import ( // add the kubernetes feature gates _ "k8s.io/kubernetes/pkg/features" + + libgorestclient "github.com/openshift/library-go/pkg/config/client" ) const ( @@ -107,6 +109,7 @@ type KubeControllerManagerOptions struct { // ComponentGlobalsRegistry is the registry where the effective versions and feature gates for all components are stored. ComponentGlobalsRegistry featuregate.ComponentGlobalsRegistry + OpenShiftContext kubecontrollerconfig.OpenShiftContext } // NewKubeControllerManagerOptions creates a new KubeControllerManagerOptions with a default config. @@ -301,6 +304,9 @@ func (s *KubeControllerManagerOptions) Flags(allControllers []string, disabledBy } s.ComponentGlobalsRegistry.AddFlags(fss.FlagSet("generic")) + fs.StringVar(&s.OpenShiftContext.OpenShiftConfig, "openshift-config", s.OpenShiftContext.OpenShiftConfig, "indicates that this process should be compatible with openshift start master") + fs.MarkHidden("openshift-config") + fs.BoolVar(&s.OpenShiftContext.UnsupportedKubeAPIOverPreferredHost, "unsupported-kube-api-over-localhost", false, "when set makes KCM prefer talking to localhost kube-apiserver (when available) instead of LB") return fss } @@ -408,6 +414,9 @@ func (s *KubeControllerManagerOptions) ApplyTo(c *kubecontrollerconfig.Config, a return err } } + + c.OpenShiftContext = s.OpenShiftContext + return nil } @@ -489,6 +498,14 @@ func (s KubeControllerManagerOptions) Config(allControllers []string, disabledBy kubeconfig.QPS = s.Generic.ClientConnection.QPS kubeconfig.Burst = int(s.Generic.ClientConnection.Burst) + if s.OpenShiftContext.PreferredHostRoundTripperWrapperFn != nil { + libgorestclient.DefaultServerName(kubeconfig) + kubeconfig.Wrap(s.OpenShiftContext.PreferredHostRoundTripperWrapperFn) + } + for _, customOpenShiftRoundTripper := range s.OpenShiftContext.CustomRoundTrippers { + kubeconfig.Wrap(customOpenShiftRoundTripper) + } + client, err := clientset.NewForConfig(restclient.AddUserAgent(kubeconfig, KubeControllerManagerUserAgent)) if err != nil { return nil, err diff --git a/cmd/kube-controller-manager/app/patch.go b/cmd/kube-controller-manager/app/patch.go new file mode 100644 index 0000000000000..9e4915b6d6156 --- /dev/null +++ b/cmd/kube-controller-manager/app/patch.go @@ -0,0 +1,180 @@ +package app + +import ( + "fmt" + "io/ioutil" + "net/http" + "path" + "strings" + "time" + + "k8s.io/apimachinery/pkg/util/json" + kyaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/informers" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/transport" + "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/kubernetes/cmd/kube-controller-manager/app/config" + "k8s.io/kubernetes/cmd/kube-controller-manager/app/options" + + libgorestclient "github.com/openshift/library-go/pkg/config/client" + "github.com/openshift/library-go/pkg/monitor/health" +) + +var InformerFactoryOverride informers.SharedInformerFactory + +func SetUpCustomRoundTrippersForOpenShift(controllerManagerOptions *options.KubeControllerManagerOptions) error { + controllerManagerOptions.OpenShiftContext.CustomRoundTrippers = []transport.WrapperFunc{newRejectIfNotReadyHeaderRoundTripper([]string{"generic-garbage-collector", "namespace-controller"})} + + if !controllerManagerOptions.OpenShiftContext.UnsupportedKubeAPIOverPreferredHost { + return nil + } + + config, err := clientcmd.BuildConfigFromFlags(controllerManagerOptions.Master, controllerManagerOptions.Generic.ClientConnection.Kubeconfig) + if err != nil { + return err + } + libgorestclient.DefaultServerName(config) + + targetProvider := health.StaticTargetProvider{"localhost:6443"} + controllerManagerOptions.OpenShiftContext.PreferredHostHealthMonitor, err = health.New(targetProvider, createRestConfigForHealthMonitor(config)) + if err != nil { + return err + } + controllerManagerOptions.OpenShiftContext.PreferredHostHealthMonitor. + WithHealthyProbesThreshold(3). + WithUnHealthyProbesThreshold(5). + WithProbeInterval(5 * time.Second). + WithProbeResponseTimeout(2 * time.Second). + WithMetrics(health.Register(legacyregistry.MustRegister)) + + controllerManagerOptions.OpenShiftContext.PreferredHostRoundTripperWrapperFn = libgorestclient.NewPreferredHostRoundTripper(func() string { + healthyTargets, _ := controllerManagerOptions.OpenShiftContext.PreferredHostHealthMonitor.Targets() + if len(healthyTargets) == 1 { + return healthyTargets[0] + } + return "" + }) + + controllerManagerOptions.Authentication.WithCustomRoundTripper(controllerManagerOptions.OpenShiftContext.PreferredHostRoundTripperWrapperFn) + controllerManagerOptions.Authorization.WithCustomRoundTripper(controllerManagerOptions.OpenShiftContext.PreferredHostRoundTripperWrapperFn) + + return nil +} + +func ShimForOpenShift(controllerManagerOptions *options.KubeControllerManagerOptions, controllerManager *config.Config) error { + if len(controllerManager.OpenShiftContext.OpenShiftConfig) == 0 { + return nil + } + + // TODO this gets removed when no longer take flags and no longer build a recycler template + openshiftConfig, err := getOpenShiftConfig(controllerManager.OpenShiftContext.OpenShiftConfig) + if err != nil { + return err + } + + // TODO this should be replaced by using a flex volume to inject service serving cert CAs into pods instead of adding it to the sa token + if err := applyOpenShiftServiceServingCertCAFunc(path.Dir(controllerManager.OpenShiftContext.OpenShiftConfig), openshiftConfig); err != nil { + return err + } + + // skip GC on some openshift resources + // TODO this should be replaced by discovery information in some way + if err := applyOpenShiftGCConfig(controllerManager); err != nil { + return err + } + + if err := applyOpenShiftConfigDefaultProjectSelector(controllerManagerOptions, openshiftConfig); err != nil { + return err + } + + // Overwrite the informers, because we have our custom generic informers for quota. + // TODO update quota to create its own informer like garbage collection + if informers, err := newInformerFactory(controllerManager.Kubeconfig); err != nil { + return err + } else { + InformerFactoryOverride = informers + } + + return nil +} + +func getOpenShiftConfig(configFile string) (map[string]interface{}, error) { + configBytes, err := ioutil.ReadFile(configFile) + if err != nil { + return nil, err + } + jsonBytes, err := kyaml.ToJSON(configBytes) + if err != nil { + return nil, err + } + config := map[string]interface{}{} + if err := json.Unmarshal(jsonBytes, &config); err != nil { + return nil, err + } + + return config, nil +} + +func applyOpenShiftConfigDefaultProjectSelector(controllerManagerOptions *options.KubeControllerManagerOptions, openshiftConfig map[string]interface{}) error { + projectConfig, ok := openshiftConfig["projectConfig"] + if !ok { + return nil + } + + castProjectConfig := projectConfig.(map[string]interface{}) + defaultNodeSelector, ok := castProjectConfig["defaultNodeSelector"] + if !ok { + return nil + } + controllerManagerOptions.OpenShiftContext.OpenShiftDefaultProjectNodeSelector = defaultNodeSelector.(string) + + return nil +} + +func createRestConfigForHealthMonitor(restConfig *rest.Config) *rest.Config { + restConfigCopy := *restConfig + rest.AddUserAgent(&restConfigCopy, fmt.Sprintf("%s-health-monitor", options.KubeControllerManagerUserAgent)) + + return &restConfigCopy +} + +// newRejectIfNotReadyHeaderRoundTripper a middleware for setting X-OpenShift-Internal-If-Not-Ready HTTP Header for the given users. +// In general, setting the header will result in getting 429 when the server hasn't been ready. +// This prevents certain controllers like GC, Namespace from accidentally removing resources when the caches haven't been fully synchronized. +func newRejectIfNotReadyHeaderRoundTripper(eligibleUsers []string) func(http.RoundTripper) http.RoundTripper { + return func(rt http.RoundTripper) http.RoundTripper { + return &rejectIfNotReadyHeaderRT{baseRT: rt, eligibleUsers: eligibleUsers} + } +} + +type rejectIfNotReadyHeaderRT struct { + baseRT http.RoundTripper + eligibleUsers []string +} + +func (rt *rejectIfNotReadyHeaderRT) RoundTrip(r *http.Request) (*http.Response, error) { + currentUser := r.UserAgent() + for _, eligibleUser := range rt.eligibleUsers { + if strings.Contains(currentUser, eligibleUser) { + r.Header.Set("X-OpenShift-Internal-If-Not-Ready", "reject") + break + } + } + return rt.baseRT.RoundTrip(r) +} + +// mergeCh takes two stop channels and return a single one that +// closes as soon as one of the inputs closes or receives data. +func mergeCh(stopCh1, stopCh2 <-chan struct{}) <-chan struct{} { + merged := make(chan struct{}) + go func() { + defer close(merged) + select { + case <-stopCh1: + case <-stopCh2: + } + }() + return merged +} diff --git a/cmd/kube-controller-manager/app/patch_gc.go b/cmd/kube-controller-manager/app/patch_gc.go new file mode 100644 index 0000000000000..53285c96f8688 --- /dev/null +++ b/cmd/kube-controller-manager/app/patch_gc.go @@ -0,0 +1,37 @@ +package app + +import ( + gcconfig "k8s.io/kubernetes/pkg/controller/garbagecollector/config" + + "k8s.io/kubernetes/cmd/kube-controller-manager/app/config" +) + +func applyOpenShiftGCConfig(controllerManager *config.Config) error { + // TODO make this configurable or discoverable. This is going to prevent us from running the stock GC controller + // IF YOU ADD ANYTHING TO THIS LIST, MAKE SURE THAT YOU UPDATE THEIR STRATEGIES TO PREVENT GC FINALIZERS + // + // DO NOT PUT CRDs into the list. apiexstension-apiserver does not implement GarbageCollectionPolicy + // so the deletion of these will be blocked because of foregroundDeletion finalizer when foreground deletion strategy is specified. + controllerManager.ComponentConfig.GarbageCollectorController.GCIgnoredResources = append(controllerManager.ComponentConfig.GarbageCollectorController.GCIgnoredResources, + // explicitly disabled from GC for now - not enough value to track them + gcconfig.GroupResource{Group: "oauth.openshift.io", Resource: "oauthclientauthorizations"}, + gcconfig.GroupResource{Group: "oauth.openshift.io", Resource: "oauthclients"}, + gcconfig.GroupResource{Group: "user.openshift.io", Resource: "groups"}, + gcconfig.GroupResource{Group: "user.openshift.io", Resource: "identities"}, + gcconfig.GroupResource{Group: "user.openshift.io", Resource: "users"}, + gcconfig.GroupResource{Group: "image.openshift.io", Resource: "images"}, + + // virtual resource + gcconfig.GroupResource{Group: "project.openshift.io", Resource: "projects"}, + // virtual and unwatchable resource, surfaced via rbac.authorization.k8s.io objects + gcconfig.GroupResource{Group: "authorization.openshift.io", Resource: "clusterroles"}, + gcconfig.GroupResource{Group: "authorization.openshift.io", Resource: "clusterrolebindings"}, + gcconfig.GroupResource{Group: "authorization.openshift.io", Resource: "roles"}, + gcconfig.GroupResource{Group: "authorization.openshift.io", Resource: "rolebindings"}, + // these resources contain security information in their names, and we don't need to track them + gcconfig.GroupResource{Group: "oauth.openshift.io", Resource: "oauthaccesstokens"}, + gcconfig.GroupResource{Group: "oauth.openshift.io", Resource: "oauthauthorizetokens"}, + ) + + return nil +} diff --git a/cmd/kube-controller-manager/app/patch_informers_openshift.go b/cmd/kube-controller-manager/app/patch_informers_openshift.go new file mode 100644 index 0000000000000..0c032dec30483 --- /dev/null +++ b/cmd/kube-controller-manager/app/patch_informers_openshift.go @@ -0,0 +1,293 @@ +package app + +import ( + "time" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + appclient "github.com/openshift/client-go/apps/clientset/versioned" + appinformer "github.com/openshift/client-go/apps/informers/externalversions" + authorizationclient "github.com/openshift/client-go/authorization/clientset/versioned" + authorizationinformer "github.com/openshift/client-go/authorization/informers/externalversions" + buildclient "github.com/openshift/client-go/build/clientset/versioned" + buildinformer "github.com/openshift/client-go/build/informers/externalversions" + imageclient "github.com/openshift/client-go/image/clientset/versioned" + imageinformer "github.com/openshift/client-go/image/informers/externalversions" + networkclient "github.com/openshift/client-go/network/clientset/versioned" + networkinformer "github.com/openshift/client-go/network/informers/externalversions" + oauthclient "github.com/openshift/client-go/oauth/clientset/versioned" + oauthinformer "github.com/openshift/client-go/oauth/informers/externalversions" + quotaclient "github.com/openshift/client-go/quota/clientset/versioned" + quotainformer "github.com/openshift/client-go/quota/informers/externalversions" + routeclient "github.com/openshift/client-go/route/clientset/versioned" + routeinformer "github.com/openshift/client-go/route/informers/externalversions" + securityclient "github.com/openshift/client-go/security/clientset/versioned" + securityinformer "github.com/openshift/client-go/security/informers/externalversions" + templateclient "github.com/openshift/client-go/template/clientset/versioned" + templateinformer "github.com/openshift/client-go/template/informers/externalversions" + userclient "github.com/openshift/client-go/user/clientset/versioned" + userinformer "github.com/openshift/client-go/user/informers/externalversions" +) + +type externalKubeInformersWithExtraGenerics struct { + informers.SharedInformerFactory + genericResourceInformer GenericResourceInformer +} + +func (i externalKubeInformersWithExtraGenerics) ForResource(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + return i.genericResourceInformer.ForResource(resource) +} + +func (i externalKubeInformersWithExtraGenerics) Start(stopCh <-chan struct{}) { + i.SharedInformerFactory.Start(stopCh) + i.genericResourceInformer.Start(stopCh) +} + +type GenericResourceInformer interface { + ForResource(resource schema.GroupVersionResource) (informers.GenericInformer, error) + Start(stopCh <-chan struct{}) +} + +// genericResourceInformerFunc will handle a cast to a matching type +type genericResourceInformerFunc func(resource schema.GroupVersionResource) (informers.GenericInformer, error) + +func (fn genericResourceInformerFunc) ForResource(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + return fn(resource) +} + +// this is a temporary condition until we rewrite enough of generation to auto-conform to the required interface and no longer need the internal version shim +func (fn genericResourceInformerFunc) Start(stopCh <-chan struct{}) {} + +type genericInformers struct { + // this is a temporary condition until we rewrite enough of generation to auto-conform to the required interface and no longer need the internal version shim + startFn func(stopCh <-chan struct{}) + generic []GenericResourceInformer +} + +func newGenericInformers(startFn func(stopCh <-chan struct{}), informers ...GenericResourceInformer) genericInformers { + return genericInformers{ + startFn: startFn, + generic: informers, + } +} + +func (i genericInformers) ForResource(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + var firstErr error + for _, generic := range i.generic { + informer, err := generic.ForResource(resource) + if err == nil { + return informer, nil + } + if firstErr == nil { + firstErr = err + } + } + klog.V(4).Infof("Couldn't find informer for %v", resource) + return nil, firstErr +} + +func (i genericInformers) Start(stopCh <-chan struct{}) { + i.startFn(stopCh) + for _, generic := range i.generic { + generic.Start(stopCh) + } +} + +// informers is a convenient way for us to keep track of the informers, but +// is intentionally private. We don't want to leak it out further than this package. +// Everything else should say what it wants. +type combinedInformers struct { + externalKubeInformers informers.SharedInformerFactory + appInformers appinformer.SharedInformerFactory + authorizationInformers authorizationinformer.SharedInformerFactory + buildInformers buildinformer.SharedInformerFactory + imageInformers imageinformer.SharedInformerFactory + networkInformers networkinformer.SharedInformerFactory + oauthInformers oauthinformer.SharedInformerFactory + quotaInformers quotainformer.SharedInformerFactory + routeInformers routeinformer.SharedInformerFactory + securityInformers securityinformer.SharedInformerFactory + templateInformers templateinformer.SharedInformerFactory + userInformers userinformer.SharedInformerFactory +} + +func newInformerFactory(clientConfig *rest.Config) (informers.SharedInformerFactory, error) { + kubeClient, err := kubernetes.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + appClient, err := appclient.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + authorizationClient, err := authorizationclient.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + buildClient, err := buildclient.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + imageClient, err := imageclient.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + networkClient, err := networkclient.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + oauthClient, err := oauthclient.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + quotaClient, err := quotaclient.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + routerClient, err := routeclient.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + securityClient, err := securityclient.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + templateClient, err := templateclient.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + userClient, err := userclient.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + + // TODO find a single place to create and start informers. During the 1.7 rebase this will come more naturally in a config object, + // before then we should try to eliminate our direct to storage access. It's making us do weird things. + const defaultInformerResyncPeriod = 10 * time.Minute + + trim := func(obj interface{}) (interface{}, error) { + if accessor, err := meta.Accessor(obj); err == nil { + accessor.SetManagedFields(nil) + } + return obj, nil + } + + ci := &combinedInformers{ + externalKubeInformers: informers.NewSharedInformerFactoryWithOptions(kubeClient, defaultInformerResyncPeriod, informers.WithTransform(trim)), + appInformers: appinformer.NewSharedInformerFactoryWithOptions(appClient, defaultInformerResyncPeriod, appinformer.WithTransform(trim)), + authorizationInformers: authorizationinformer.NewSharedInformerFactoryWithOptions(authorizationClient, defaultInformerResyncPeriod, authorizationinformer.WithTransform(trim)), + buildInformers: buildinformer.NewSharedInformerFactoryWithOptions(buildClient, defaultInformerResyncPeriod, buildinformer.WithTransform(trim)), + imageInformers: imageinformer.NewSharedInformerFactoryWithOptions(imageClient, defaultInformerResyncPeriod, imageinformer.WithTransform(trim)), + networkInformers: networkinformer.NewSharedInformerFactoryWithOptions(networkClient, defaultInformerResyncPeriod, networkinformer.WithTransform(trim)), + oauthInformers: oauthinformer.NewSharedInformerFactoryWithOptions(oauthClient, defaultInformerResyncPeriod, oauthinformer.WithTransform(trim)), + quotaInformers: quotainformer.NewSharedInformerFactoryWithOptions(quotaClient, defaultInformerResyncPeriod, quotainformer.WithTransform(trim)), + routeInformers: routeinformer.NewSharedInformerFactoryWithOptions(routerClient, defaultInformerResyncPeriod, routeinformer.WithTransform(trim)), + securityInformers: securityinformer.NewSharedInformerFactoryWithOptions(securityClient, defaultInformerResyncPeriod, securityinformer.WithTransform(trim)), + templateInformers: templateinformer.NewSharedInformerFactoryWithOptions(templateClient, defaultInformerResyncPeriod, templateinformer.WithTransform(trim)), + userInformers: userinformer.NewSharedInformerFactoryWithOptions(userClient, defaultInformerResyncPeriod, userinformer.WithTransform(trim)), + } + + return externalKubeInformersWithExtraGenerics{ + SharedInformerFactory: ci.GetExternalKubeInformers(), + genericResourceInformer: ci.ToGenericInformer(), + }, nil +} + +func (i *combinedInformers) GetExternalKubeInformers() informers.SharedInformerFactory { + return i.externalKubeInformers +} +func (i *combinedInformers) GetAppInformers() appinformer.SharedInformerFactory { + return i.appInformers +} +func (i *combinedInformers) GetAuthorizationInformers() authorizationinformer.SharedInformerFactory { + return i.authorizationInformers +} +func (i *combinedInformers) GetBuildInformers() buildinformer.SharedInformerFactory { + return i.buildInformers +} +func (i *combinedInformers) GetImageInformers() imageinformer.SharedInformerFactory { + return i.imageInformers +} +func (i *combinedInformers) GetNetworkInformers() networkinformer.SharedInformerFactory { + return i.networkInformers +} +func (i *combinedInformers) GetOauthInformers() oauthinformer.SharedInformerFactory { + return i.oauthInformers +} +func (i *combinedInformers) GetQuotaInformers() quotainformer.SharedInformerFactory { + return i.quotaInformers +} +func (i *combinedInformers) GetRouteInformers() routeinformer.SharedInformerFactory { + return i.routeInformers +} +func (i *combinedInformers) GetSecurityInformers() securityinformer.SharedInformerFactory { + return i.securityInformers +} +func (i *combinedInformers) GetTemplateInformers() templateinformer.SharedInformerFactory { + return i.templateInformers +} +func (i *combinedInformers) GetUserInformers() userinformer.SharedInformerFactory { + return i.userInformers +} + +// Start initializes all requested informers. +func (i *combinedInformers) Start(stopCh <-chan struct{}) { + i.externalKubeInformers.Start(stopCh) + i.appInformers.Start(stopCh) + i.authorizationInformers.Start(stopCh) + i.buildInformers.Start(stopCh) + i.imageInformers.Start(stopCh) + i.networkInformers.Start(stopCh) + i.oauthInformers.Start(stopCh) + i.quotaInformers.Start(stopCh) + i.routeInformers.Start(stopCh) + i.securityInformers.Start(stopCh) + i.templateInformers.Start(stopCh) + i.userInformers.Start(stopCh) +} + +func (i *combinedInformers) ToGenericInformer() GenericResourceInformer { + return newGenericInformers( + i.Start, + i.GetExternalKubeInformers(), + genericResourceInformerFunc(func(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + return i.GetAppInformers().ForResource(resource) + }), + genericResourceInformerFunc(func(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + return i.GetAuthorizationInformers().ForResource(resource) + }), + genericResourceInformerFunc(func(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + return i.GetBuildInformers().ForResource(resource) + }), + genericResourceInformerFunc(func(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + return i.GetImageInformers().ForResource(resource) + }), + genericResourceInformerFunc(func(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + return i.GetNetworkInformers().ForResource(resource) + }), + genericResourceInformerFunc(func(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + return i.GetOauthInformers().ForResource(resource) + }), + genericResourceInformerFunc(func(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + return i.GetQuotaInformers().ForResource(resource) + }), + genericResourceInformerFunc(func(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + return i.GetRouteInformers().ForResource(resource) + }), + genericResourceInformerFunc(func(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + return i.GetSecurityInformers().ForResource(resource) + }), + genericResourceInformerFunc(func(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + return i.GetTemplateInformers().ForResource(resource) + }), + genericResourceInformerFunc(func(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + return i.GetUserInformers().ForResource(resource) + }), + ) +} diff --git a/cmd/kube-controller-manager/app/patch_satoken.go b/cmd/kube-controller-manager/app/patch_satoken.go new file mode 100644 index 0000000000000..82f1bb9b3ff1a --- /dev/null +++ b/cmd/kube-controller-manager/app/patch_satoken.go @@ -0,0 +1,87 @@ +package app + +import ( + "fmt" + "io/ioutil" + "path/filepath" + + certutil "k8s.io/client-go/util/cert" + serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount" +) + +var applyOpenShiftServiceServingCertCA = func(in serviceaccountcontroller.TokensControllerOptions) serviceaccountcontroller.TokensControllerOptions { + return in +} + +func applyOpenShiftServiceServingCertCAFunc(openshiftConfigBase string, openshiftConfig map[string]interface{}) error { + serviceServingCertCAFilename := getServiceServingCertCAFilename(openshiftConfig) + if len(serviceServingCertCAFilename) == 0 { + return nil + } + + resolvePath(&serviceServingCertCAFilename, openshiftConfigBase) + + serviceServingCA, err := ioutil.ReadFile(serviceServingCertCAFilename) + if err != nil { + return fmt.Errorf("error reading ca file for Service Serving Certificate Signer: %s: %v", serviceServingCertCAFilename, err) + } + if _, err := certutil.ParseCertsPEM(serviceServingCA); err != nil { + return fmt.Errorf("error parsing ca file for Service Serving Certificate Signer: %s: %v", serviceServingCertCAFilename, err) + } + + applyOpenShiftServiceServingCertCA = func(controllerOptions serviceaccountcontroller.TokensControllerOptions) serviceaccountcontroller.TokensControllerOptions { + if len(serviceServingCA) == 0 { + return controllerOptions + } + + // if we have a rootCA bundle add that too. The rootCA will be used when hitting the default master service, since those are signed + // using a different CA by default. The rootCA's key is more closely guarded than ours and if it is compromised, that power could + // be used to change the trusted signers for every pod anyway, so we're already effectively trusting it. + if len(controllerOptions.RootCA) > 0 { + controllerOptions.ServiceServingCA = append(controllerOptions.ServiceServingCA, controllerOptions.RootCA...) + controllerOptions.ServiceServingCA = append(controllerOptions.ServiceServingCA, []byte("\n")...) + } + controllerOptions.ServiceServingCA = append(controllerOptions.ServiceServingCA, serviceServingCA...) + + return controllerOptions + } + + return nil +} + +func getServiceServingCertCAFilename(config map[string]interface{}) string { + controllerConfig, ok := config["controllerConfig"] + if !ok { + sscConfig, ok := config["serviceServingCert"] + if !ok { + return "" + } + sscConfigMap := sscConfig.(map[string]interface{}) + return sscConfigMap["certFile"].(string) + } + controllerConfigMap := controllerConfig.(map[string]interface{}) + sscConfig, ok := controllerConfigMap["serviceServingCert"] + if !ok { + return "" + } + sscConfigMap := sscConfig.(map[string]interface{}) + signerConfig, ok := sscConfigMap["signer"] + if !ok { + return "" + } + signerConfigMap := signerConfig.(map[string]interface{}) + return signerConfigMap["certFile"].(string) +} + +// resolvePath updates the given refs to be absolute paths, relative to the given base directory +func resolvePath(ref *string, base string) error { + // Don't resolve empty paths + if len(*ref) > 0 { + // Don't resolve absolute paths + if !filepath.IsAbs(*ref) { + *ref = filepath.Join(base, *ref) + } + } + + return nil +} diff --git a/cmd/kube-controller-manager/app/patch_test.go b/cmd/kube-controller-manager/app/patch_test.go new file mode 100644 index 0000000000000..fad35b4620e60 --- /dev/null +++ b/cmd/kube-controller-manager/app/patch_test.go @@ -0,0 +1,127 @@ +package app + +import ( + "fmt" + "net/http" + "net/textproto" + "testing" +) + +func TestRejectIfNotReadyHeaderRT(t *testing.T) { + scenarios := []struct { + name string + eligibleUsers []string + currentUser string + expectHeader bool + }{ + { + name: "scenario 1: happy path", + currentUser: "system:serviceaccount:kube-system:generic-garbage-collector", + eligibleUsers: []string{"generic-garbage-collector", "namespace-controller"}, + expectHeader: true, + }, + { + name: "scenario 2: ineligible user", + currentUser: "system:serviceaccount:kube-system:service-account-controller", + eligibleUsers: []string{"generic-garbage-collector", "namespace-controller"}, + expectHeader: false, + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + // set up the test + fakeRT := fakeRTFunc(func(r *http.Request) (*http.Response, error) { + // this is where we validate if the header was set or not + headerSet := func() bool { + if len(r.Header.Get("X-OpenShift-Internal-If-Not-Ready")) > 0 { + return true + } + return false + }() + if scenario.expectHeader && !headerSet { + return nil, fmt.Errorf("%v header wasn't set", textproto.CanonicalMIMEHeaderKey("X-OpenShift-Internal-If-Not-Ready")) + } + if !scenario.expectHeader && headerSet { + return nil, fmt.Errorf("didn't expect %v header", textproto.CanonicalMIMEHeaderKey("X-OpenShift-Internal-If-Not-Ready")) + } + if scenario.expectHeader { + if value := r.Header.Get("X-OpenShift-Internal-If-Not-Ready"); value != "reject" { + return nil, fmt.Errorf("unexpected value %v in the %v header, expected \"reject\"", value, textproto.CanonicalMIMEHeaderKey("X-OpenShift-Internal-If-Not-Ready")) + } + } + return nil, nil + }) + target := newRejectIfNotReadyHeaderRoundTripper(scenario.eligibleUsers)(fakeRT) + req, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + req.Header.Set("User-Agent", scenario.currentUser) + + // act and validate + if _, err := target.RoundTrip(req); err != nil { + t.Fatal(err) + } + }) + } +} + +type fakeRTFunc func(r *http.Request) (*http.Response, error) + +func (rt fakeRTFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return rt(r) +} + +func TestMergeCh(t *testing.T) { + testCases := []struct { + name string + chan1 chan struct{} + chan2 chan struct{} + closeFn func(chan struct{}, chan struct{}) + }{ + { + name: "chan1 gets closed", + chan1: make(chan struct{}), + chan2: make(chan struct{}), + closeFn: func(a, b chan struct{}) { + close(a) + }, + }, + { + name: "chan2 gets closed", + chan1: make(chan struct{}), + chan2: make(chan struct{}), + closeFn: func(a, b chan struct{}) { + close(b) + }, + }, + { + name: "both channels get closed", + chan1: make(chan struct{}), + chan2: make(chan struct{}), + closeFn: func(a, b chan struct{}) { + close(a) + close(b) + }, + }, + { + name: "channel receives data and returned channel is closed", + chan1: make(chan struct{}), + chan2: make(chan struct{}), + closeFn: func(a, b chan struct{}) { + a <- struct{}{} + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + go tc.closeFn(tc.chan1, tc.chan2) + merged := mergeCh(tc.chan1, tc.chan2) + if _, ok := <-merged; ok { + t.Fatalf("expected closed channel, got data") + } + }) + } +} diff --git a/cmd/kube-controller-manager/app/testing/testserver.go b/cmd/kube-controller-manager/app/testing/testserver.go index c29ada8f566c2..9d2cd929f2137 100644 --- a/cmd/kube-controller-manager/app/testing/testserver.go +++ b/cmd/kube-controller-manager/app/testing/testserver.go @@ -122,7 +122,8 @@ func StartTestServer(ctx context.Context, customFlags []string) (result TestServ go func(ctx context.Context) { defer close(errCh) - if err := app.Run(ctx, config.Complete()); err != nil { + stopCh := make(chan struct{}) + if err := app.Run(ctx, config.Complete(), stopCh); err != nil { errCh <- err } }(ctx) diff --git a/cmd/kube-controller-manager/names/controller_names.go b/cmd/kube-controller-manager/names/controller_names.go index db88935c4c791..79d2f4c56062d 100644 --- a/cmd/kube-controller-manager/names/controller_names.go +++ b/cmd/kube-controller-manager/names/controller_names.go @@ -77,6 +77,7 @@ const ( PersistentVolumeProtectionController = "persistentvolume-protection-controller" TTLAfterFinishedController = "ttl-after-finished-controller" RootCACertificatePublisherController = "root-ca-certificate-publisher-controller" + ServiceCACertificatePublisherController = "service-ca-certificate-publisher-controller" KubeAPIServerClusterTrustBundlePublisherController = "kube-apiserver-serving-clustertrustbundle-publisher-controller" EphemeralVolumeController = "ephemeral-volume-controller" StorageVersionGarbageCollectorController = "storageversion-garbage-collector-controller" diff --git a/cmd/kube-scheduler/app/config/config.go b/cmd/kube-scheduler/app/config/config.go index 774cab62dc666..34803dcac449c 100644 --- a/cmd/kube-scheduler/app/config/config.go +++ b/cmd/kube-scheduler/app/config/config.go @@ -57,6 +57,9 @@ type Config struct { // value, the pod will be moved from unschedulablePods to backoffQ or activeQ. // If this value is empty, the default value (5min) will be used. PodMaxInUnschedulablePodsDuration time.Duration + + // OpenShiftContext is additional context that we need to launch the kube-scheduler for openshift + OpenShiftContext OpenShiftContext } type completedConfig struct { diff --git a/cmd/kube-scheduler/app/config/patch.go b/cmd/kube-scheduler/app/config/patch.go new file mode 100644 index 0000000000000..1f2e3ea2c6d3b --- /dev/null +++ b/cmd/kube-scheduler/app/config/patch.go @@ -0,0 +1,15 @@ +package config + +import ( + "k8s.io/client-go/transport" + + "github.com/openshift/library-go/pkg/monitor/health" +) + +// OpenShiftContext is additional context that we need to launch the kube-scheduler for openshift. +// Basically, this holds our additional config information. +type OpenShiftContext struct { + UnsupportedKubeAPIOverPreferredHost bool + PreferredHostRoundTripperWrapperFn transport.WrapperFunc + PreferredHostHealthMonitor *health.Prober +} diff --git a/cmd/kube-scheduler/app/options/options.go b/cmd/kube-scheduler/app/options/options.go index 95e1c00288436..6c3c1cf7ebd0f 100644 --- a/cmd/kube-scheduler/app/options/options.go +++ b/cmd/kube-scheduler/app/options/options.go @@ -52,6 +52,8 @@ import ( kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/apis/config/validation" netutils "k8s.io/utils/net" + + libgorestclient "github.com/openshift/library-go/pkg/config/client" ) // Options has all the params needed to run a Scheduler @@ -80,6 +82,9 @@ type Options struct { // Flags hold the parsed CLI flags. Flags *cliflag.NamedFlagSets + + // OpenShiftContext is additional context that we need to launch the kube-scheduler for openshift. + OpenShiftContext schedulerappconfig.OpenShiftContext } // NewOptions returns default scheduler app options. @@ -196,6 +201,7 @@ func (o *Options) initFlags() { fs.StringVar(&o.ConfigFile, "config", o.ConfigFile, "The path to the configuration file.") fs.StringVar(&o.WriteConfigTo, "write-config-to", o.WriteConfigTo, "If set, write the configuration values to this file and exit.") fs.StringVar(&o.Master, "master", o.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") + fs.BoolVar(&o.OpenShiftContext.UnsupportedKubeAPIOverPreferredHost, "unsupported-kube-api-over-localhost", false, "when set makes KS prefer talking to localhost kube-apiserver (when available) instead of an LB") o.SecureServing.AddFlags(nfs.FlagSet("secure serving")) o.Authentication.AddFlags(nfs.FlagSet("authentication")) @@ -241,6 +247,10 @@ func (o *Options) ApplyTo(logger klog.Logger, c *schedulerappconfig.Config) erro if err != nil { return err } + if c.OpenShiftContext.PreferredHostRoundTripperWrapperFn != nil { + libgorestclient.DefaultServerName(kubeConfig) + kubeConfig.Wrap(c.OpenShiftContext.PreferredHostRoundTripperWrapperFn) + } c.KubeConfig = kubeConfig if err := o.SecureServing.ApplyTo(&c.SecureServing, &c.LoopbackClientConfig); err != nil { @@ -298,6 +308,7 @@ func (o *Options) Config(ctx context.Context) (*schedulerappconfig.Config, error } c := &schedulerappconfig.Config{} + c.OpenShiftContext = o.OpenShiftContext if err := o.ApplyTo(logger, c); err != nil { return nil, err } diff --git a/cmd/kube-scheduler/app/options/patch.go b/cmd/kube-scheduler/app/options/patch.go new file mode 100644 index 0000000000000..71c3c28b1f4bc --- /dev/null +++ b/cmd/kube-scheduler/app/options/patch.go @@ -0,0 +1,11 @@ +package options + +import ( + "k8s.io/klog/v2" + + kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" +) + +func LoadKubeSchedulerConfiguration(logger klog.Logger, file string) (*kubeschedulerconfig.KubeSchedulerConfiguration, error) { + return LoadConfigFromFile(logger, file) +} diff --git a/cmd/kube-scheduler/app/patch.go b/cmd/kube-scheduler/app/patch.go new file mode 100644 index 0000000000000..bcdf678774c4d --- /dev/null +++ b/cmd/kube-scheduler/app/patch.go @@ -0,0 +1,72 @@ +package app + +import ( + "time" + + "k8s.io/klog/v2" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/kubernetes/cmd/kube-scheduler/app/options" + + libgorestclient "github.com/openshift/library-go/pkg/config/client" + "github.com/openshift/library-go/pkg/monitor/health" +) + +func setUpPreferredHostForOpenShift(logger klog.Logger, kubeSchedulerOptions *options.Options) error { + if !kubeSchedulerOptions.OpenShiftContext.UnsupportedKubeAPIOverPreferredHost { + return nil + } + + master := kubeSchedulerOptions.Master + var kubeConfig string + + // We cannot load component config anymore as the options are not being initialized. + // if there was no kubeconfig specified we won't be able to get cluster info. + // in that case try to load the configuration and read kubeconfig directly from it if it was provided. + if len(kubeSchedulerOptions.ConfigFile) > 0 { + cfg, err := options.LoadKubeSchedulerConfiguration(logger, kubeSchedulerOptions.ConfigFile) + if err != nil { + return err + } + kubeConfig = cfg.ClientConnection.Kubeconfig + } + + config, err := clientcmd.BuildConfigFromFlags(master, kubeConfig) + if err != nil { + return err + } + libgorestclient.DefaultServerName(config) + + targetProvider := health.StaticTargetProvider{"localhost:6443"} + kubeSchedulerOptions.OpenShiftContext.PreferredHostHealthMonitor, err = health.New(targetProvider, createRestConfigForHealthMonitor(config)) + if err != nil { + return err + } + kubeSchedulerOptions.OpenShiftContext.PreferredHostHealthMonitor. + WithHealthyProbesThreshold(3). + WithUnHealthyProbesThreshold(5). + WithProbeInterval(5 * time.Second). + WithProbeResponseTimeout(2 * time.Second). + WithMetrics(health.Register(legacyregistry.MustRegister)) + + kubeSchedulerOptions.OpenShiftContext.PreferredHostRoundTripperWrapperFn = libgorestclient.NewPreferredHostRoundTripper(func() string { + healthyTargets, _ := kubeSchedulerOptions.OpenShiftContext.PreferredHostHealthMonitor.Targets() + if len(healthyTargets) == 1 { + return healthyTargets[0] + } + return "" + }) + + kubeSchedulerOptions.Authentication.WithCustomRoundTripper(kubeSchedulerOptions.OpenShiftContext.PreferredHostRoundTripperWrapperFn) + kubeSchedulerOptions.Authorization.WithCustomRoundTripper(kubeSchedulerOptions.OpenShiftContext.PreferredHostRoundTripperWrapperFn) + return nil +} + +func createRestConfigForHealthMonitor(restConfig *rest.Config) *rest.Config { + restConfigCopy := *restConfig + rest.AddUserAgent(&restConfigCopy, "kube-scheduler-health-monitor") + + return &restConfigCopy +} diff --git a/cmd/kube-scheduler/app/server.go b/cmd/kube-scheduler/app/server.go index 1785bbdcc91b2..7c6568b7d0201 100644 --- a/cmd/kube-scheduler/app/server.go +++ b/cmd/kube-scheduler/app/server.go @@ -149,6 +149,11 @@ func runCommand(cmd *cobra.Command, opts *options.Options, registryOptions ...Op cancel() }() + logger := klog.FromContext(ctx) + if err := setUpPreferredHostForOpenShift(logger, opts); err != nil { + return err + } + cc, sched, err := Setup(ctx, opts, registryOptions...) if err != nil { return err @@ -167,6 +172,11 @@ func Run(ctx context.Context, cc *schedulerserverconfig.CompletedConfig, sched * logger.Info("Golang settings", "GOGC", os.Getenv("GOGC"), "GOMAXPROCS", os.Getenv("GOMAXPROCS"), "GOTRACEBACK", os.Getenv("GOTRACEBACK")) + // start the localhost health monitor early so that it can be used by the LE client + if cc.OpenShiftContext.PreferredHostHealthMonitor != nil { + go cc.OpenShiftContext.PreferredHostHealthMonitor.Run(ctx) + } + // Configz registration. if cz, err := configz.New("componentconfig"); err != nil { return fmt.Errorf("unable to register configz: %s", err) @@ -325,7 +335,7 @@ func buildHandlerChain(handler http.Handler, authn authenticator.Request, authz handler = genericapifilters.WithAuthentication(handler, authn, failedHandler, nil, nil) handler = genericapifilters.WithRequestInfo(handler, requestInfoResolver) handler = genericapifilters.WithCacheControl(handler) - handler = genericfilters.WithHTTPLogging(handler) + handler = genericfilters.WithHTTPLogging(handler, nil) handler = genericfilters.WithPanicRecovery(handler, requestInfoResolver) return handler diff --git a/cmd/kubelet/app/auth.go b/cmd/kubelet/app/auth.go index b5117a72c8121..1000f319ae89e 100644 --- a/cmd/kubelet/app/auth.go +++ b/cmd/kubelet/app/auth.go @@ -62,6 +62,7 @@ func BuildAuth(nodeName types.NodeName, client clientset.Interface, config kubel if err != nil { return nil, nil, err } + authorizer = wrapAuthorizerWithMetricsScraper(authorizer) return server.NewKubeletAuth(authenticator, attributes, authorizer), runAuthenticatorCAReload, nil } diff --git a/cmd/kubelet/app/options/globalflags_linux.go b/cmd/kubelet/app/options/globalflags_linux.go index e75e65ec37cd0..c2ba0d1bce39d 100644 --- a/cmd/kubelet/app/options/globalflags_linux.go +++ b/cmd/kubelet/app/options/globalflags_linux.go @@ -42,6 +42,7 @@ func addCadvisorFlags(fs *pflag.FlagSet) { // e2e node tests rely on this register(global, local, "housekeeping_interval") + register(global, local, "max_housekeeping_interval") // These flags were implicit from cadvisor, and are mistakes that should be registered deprecated: const deprecated = "This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed." diff --git a/cmd/kubelet/app/options/options.go b/cmd/kubelet/app/options/options.go index a6efdadb7a9c5..2aa2fe5879668 100644 --- a/cmd/kubelet/app/options/options.go +++ b/cmd/kubelet/app/options/options.go @@ -155,6 +155,9 @@ func ValidateKubeletFlags(f *KubeletFlags) error { invalidLabelErrs := make(map[string][]string) for k, v := range f.NodeLabels { if isKubernetesLabel(k) && !kubeletapis.IsKubeletLabel(k) { + if kubeletapis.IsForbiddenOpenshiftLabel(k) { + continue + } unknownLabels.Insert(k) } diff --git a/cmd/kubelet/app/patch_auth.go b/cmd/kubelet/app/patch_auth.go new file mode 100644 index 0000000000000..04d860cf5e6d5 --- /dev/null +++ b/cmd/kubelet/app/patch_auth.go @@ -0,0 +1,17 @@ +package app + +import ( + "github.com/openshift/library-go/pkg/authorization/hardcodedauthorizer" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/authorization/union" +) + +// wrapAuthorizerWithMetricsScraper add an authorizer to always approver the openshift metrics scraper. +// This eliminates an unnecessary SAR for scraping metrics and enables metrics gathering when network access +// to the kube-apiserver is interrupted +func wrapAuthorizerWithMetricsScraper(authz authorizer.Authorizer) authorizer.Authorizer { + return union.New( + hardcodedauthorizer.NewHardCodedMetricsAuthorizer(), + authz, + ) +} diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index cfcf6e7d5cdcf..779542466449f 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -756,6 +756,12 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend cgroupRoots = append(cgroupRoots, s.SystemCgroups) } + // CARRY: Monitor extra cgroups that are specific to OpenShift deployments + // Adding them here since there is no way to handle this via configuration atm + // - ovs.slice is configured on clusters that use the NTO's PerformanceProfile and only exists together + // with system-cpu-reserved + cgroupRoots = append(cgroupRoots, "/ovs.slice") + if kubeDeps.CAdvisorInterface == nil { imageFsInfoProvider := cadvisor.NewImageFsInfoProvider(s.ContainerRuntimeEndpoint) kubeDeps.CAdvisorInterface, err = cadvisor.New(imageFsInfoProvider, s.RootDirectory, cgroupRoots, cadvisor.UsingLegacyCadvisorStats(s.ContainerRuntimeEndpoint), s.LocalStorageCapacityIsolation) diff --git a/cmd/watch-termination/main.go b/cmd/watch-termination/main.go new file mode 100644 index 0000000000000..aa3aa8800854b --- /dev/null +++ b/cmd/watch-termination/main.go @@ -0,0 +1,366 @@ +package main + +import ( + "context" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "os/signal" + "strings" + "sync" + "syscall" + "time" + + "gopkg.in/natefinch/lumberjack.v2" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" +) + +func main() { + os.Exit(run()) +} + +func run() int { + terminationLog := flag.String("termination-log-file", "", "Write logs after SIGTERM to this file (in addition to stderr)") + terminationLock := flag.String("termination-touch-file", "", "Touch this file on SIGTERM and delete on termination") + processOverlapDetectionFile := flag.String("process-overlap-detection-file", "", "This file is present when the kube-apiserver initialization timed out while waiting for kubelet to terminate old process") + kubeconfigPath := flag.String("kubeconfig", "", "Optional kubeconfig used to create events") + gracefulTerminatioPeriod := flag.Duration("graceful-termination-duration", 105*time.Second, "The duration of the graceful termination period, e.g. 105s") + + klog.InitFlags(nil) + flag.Set("v", "9") + + // never log to stderr, only through our termination log writer (which sends it also to stderr) + flag.Set("logtostderr", "false") + flag.Set("stderrthreshold", "99") + + flag.Parse() + args := flag.CommandLine.Args() + + if len(args) == 0 { + fmt.Println("Missing command line") + return 1 + } + + // use special tee-like writer when termination log is set + termCh := make(chan struct{}) + var stderr io.Writer = os.Stderr + var terminationLogger *terminationFileWriter + if len(*terminationLog) > 0 { + terminationLogger = &terminationFileWriter{ + Writer: os.Stderr, + fn: *terminationLog, + startFileLoggingCh: termCh, + } + stderr = terminationLogger + + // do the klog file writer dance: klog writes to all outputs of lower + // severity. No idea why. So we discard for anything other than info. + // Otherwise, we would see errors multiple times. + klog.SetOutput(ioutil.Discard) + klog.SetOutputBySeverity("INFO", stderr) + } + + var client kubernetes.Interface + if len(*kubeconfigPath) > 0 { + loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&clientcmd.ClientConfigLoadingRules{ExplicitPath: *kubeconfigPath}, &clientcmd.ConfigOverrides{}) + if cfg, err := loader.ClientConfig(); err != nil { + klog.Errorf("failed to load kubeconfig %q: %v", *kubeconfigPath, err) + return 1 + } else { + client = kubernetes.NewForConfigOrDie(cfg) + } + } + + if processOverlapDetectionFile != nil && len(*processOverlapDetectionFile) > 0 { + var deleteDetectionFileOnce sync.Once + + if _, err := os.Stat(*processOverlapDetectionFile); err != nil && !os.IsNotExist(err) { + klog.Errorf("failed to read process overlap detection file %q: %v", *processOverlapDetectionFile, err) + return 1 + } else if err == nil { + ref, err := eventReference() + if err != nil { + klog.Errorf("failed to get event target: %v", err) + return 1 + } + go func() { + defer deleteDetectionFileOnce.Do(func() { + if err := os.Remove(*processOverlapDetectionFile); err != nil { + klog.Warningf("Failed to remove process overlap termination file %q: %v", *processOverlapDetectionFile, err) + } + }) + if err := retry.OnError(retry.DefaultBackoff, func(err error) bool { + select { + case <-termCh: + // stop retry on termination + return false + default: + } + // every error is retriable + return true + }, func() error { + return eventf(client.CoreV1().Events(ref.Namespace), *ref, corev1.EventTypeWarning, "TerminationProcessOverlapDetected", "The kube-apiserver initialization timed out while waiting for kubelet to terminate old process") + }); err != nil { + klog.Warning(err) + } + }() + } + } + + // touch file early. If the file is not removed on termination, we are not + // terminating cleanly via SIGTERM. + if len(*terminationLock) > 0 { + ref, err := eventReference() + if err != nil { + klog.Errorf("failed to get event target: %v", err) + return 1 + } + + if st, err := os.Stat(*terminationLock); err == nil { + podName := "unknown" + if v := os.Getenv("POD_NAME"); len(v) > 0 { + podName = v // pod name is always the same for static pods + } + msg := fmt.Sprintf("Previous pod %s started at %s did not terminate gracefully", podName, st.ModTime().String()) + + klog.Warning(msg) + _, _ = terminationLogger.WriteToTerminationLog([]byte(msg + "\n")) + + go retry.OnError(retry.DefaultBackoff, func(err error) bool { + select { + case <-termCh: + // stop retry on termination + return false + default: + } + // every error is retriable + return true + }, func() error { + return eventf(client.CoreV1().Events(ref.Namespace), *ref, corev1.EventTypeWarning, "NonGracefulTermination", msg) + }) + + klog.Infof("Deleting old termination lock file %q", *terminationLock) + if err := os.Remove(*terminationLock); err != nil { + klog.Errorf("Old termination lock file deletion failed: %v", err) + } + } + + // separation to see where the new one is starting + _, _ = terminationLogger.WriteToTerminationLog([]byte("---\n")) + + klog.Infof("Touching termination lock file %q", *terminationLock) + if err := touch(*terminationLock); err != nil { + klog.Infof("Error touching %s: %v", *terminationLock, err) + // keep going + } + + var deleteLockOnce sync.Once + + if *gracefulTerminatioPeriod > 2*time.Second { + go func() { + <-termCh + <-time.After(*gracefulTerminatioPeriod - 2*time.Second) + + deleteLockOnce.Do(func() { + klog.Infof("Graceful termination time nearly passed and kube-apiserver has still not terminated. Deleting termination lock file %q to avoid a false positive.", *terminationLock) + if err := os.Remove(*terminationLock); err != nil { + klog.Errorf("Termination lock file deletion failed: %v", err) + } + + if err := eventf(client.CoreV1().Events(ref.Namespace), *ref, corev1.EventTypeWarning, "GracefulTerminationTimeout", "kube-apiserver did not terminate within %s", *gracefulTerminatioPeriod); err != nil { + klog.Error(err) + } + }) + }() + } + + defer deleteLockOnce.Do(func() { + klog.Infof("Deleting termination lock file %q", *terminationLock) + if err := os.Remove(*terminationLock); err != nil { + klog.Errorf("Termination lock file deletion failed: %v", err) + } + }) + } + + cmd := exec.Command(args[0], args[1:]...) + cmd.Stdout = os.Stdout + cmd.Stderr = stderr + + // forward SIGTERM and SIGINT to child + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGTERM, syscall.SIGINT) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for s := range sigCh { + select { + case <-termCh: + default: + close(termCh) + } + + klog.Infof("Received signal %s. Forwarding to sub-process %q.", s, args[0]) + + cmd.Process.Signal(s) + } + }() + + klog.Infof("Launching sub-process %q", cmd) + rc := 0 + if err := cmd.Run(); err != nil { + if exitError, ok := err.(*exec.ExitError); ok { + rc = exitError.ExitCode() + } else { + klog.Infof("Failed to launch %s: %v", args[0], err) + return 255 + } + } + + // remove signal handling + signal.Stop(sigCh) + close(sigCh) + wg.Wait() + + klog.Infof("Termination finished with exit code %d", rc) + return rc +} + +// terminationFileWriter forwards everything to the embedded writer. When +// startFileLoggingCh is closed, everything is appended to the given file name +// in addition. +type terminationFileWriter struct { + io.Writer + fn string + startFileLoggingCh <-chan struct{} + + logger io.Writer +} + +func (w *terminationFileWriter) WriteToTerminationLog(bs []byte) (int, error) { + if w == nil { + return len(bs), nil + } + + if w.logger == nil { + l := &lumberjack.Logger{ + Filename: w.fn, + MaxSize: 100, + MaxBackups: 3, + MaxAge: 28, + Compress: false, + } + w.logger = l + fmt.Fprintf(os.Stderr, "Copying termination logs to %q\n", w.fn) + } + if n, err := w.logger.Write(bs); err != nil { + return n, err + } else if n != len(bs) { + return n, io.ErrShortWrite + } + return len(bs), nil +} + +func (w *terminationFileWriter) Write(bs []byte) (int, error) { + // temporary hack to avoid logging sensitive tokens. + // TODO: drop when we moved to a non-sensitive storage format + if strings.Contains(string(bs), "URI=\"/apis/oauth.openshift.io/v1/oauthaccesstokens/") || strings.Contains(string(bs), "URI=\"/apis/oauth.openshift.io/v1/oauthauthorizetokens/") { + return len(bs), nil + } + + select { + case <-w.startFileLoggingCh: + if n, err := w.WriteToTerminationLog(bs); err != nil { + return n, err + } + default: + } + + return w.Writer.Write(bs) +} + +func touch(fn string) error { + _, err := os.Stat(fn) + if os.IsNotExist(err) { + file, err := os.Create(fn) + if err != nil { + return err + } + defer file.Close() + return nil + } + + currentTime := time.Now().Local() + return os.Chtimes(fn, currentTime, currentTime) +} + +func eventf(client corev1client.EventInterface, ref corev1.ObjectReference, eventType, reason, messageFmt string, args ...interface{}) error { + t := metav1.Time{Time: time.Now()} + host, _ := os.Hostname() // expicitly ignore error. Empty host is fine + + e := &corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), + Namespace: ref.Namespace, + }, + InvolvedObject: ref, + Reason: reason, + Message: fmt.Sprintf(messageFmt, args...), + FirstTimestamp: t, + LastTimestamp: t, + Count: 1, + Type: eventType, + Source: corev1.EventSource{Component: "apiserver", Host: host}, + } + + _, err := client.Create(context.TODO(), e, metav1.CreateOptions{}) + + if err == nil { + klog.V(2).Infof("Event(%#v): type: '%v' reason: '%v' %v", e.InvolvedObject, e.Type, e.Reason, e.Message) + } + + return err +} + +func eventReference() (*corev1.ObjectReference, error) { + ns := os.Getenv("POD_NAMESPACE") + pod := os.Getenv("POD_NAME") + if len(ns) == 0 && len(pod) > 0 { + serviceAccountNamespaceFile := "/var/run/secrets/kubernetes.io/serviceaccount/namespace" + if _, err := os.Stat(serviceAccountNamespaceFile); err == nil { + bs, err := ioutil.ReadFile(serviceAccountNamespaceFile) + if err != nil { + return nil, err + } + ns = string(bs) + } + } + if len(ns) == 0 { + pod = "" + ns = "kube-system" + } + if len(pod) == 0 { + return &corev1.ObjectReference{ + Kind: "Namespace", + Name: ns, + APIVersion: "v1", + }, nil + } + + return &corev1.ObjectReference{ + Kind: "Pod", + Namespace: ns, + Name: pod, + APIVersion: "v1", + }, nil +} diff --git a/go.mod b/go.mod index 5c511d5740334..75e6adef4f109 100644 --- a/go.mod +++ b/go.mod @@ -48,10 +48,15 @@ require ( github.com/moby/sys/userns v0.1.0 github.com/mrunalp/fileutils v0.5.1 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 - github.com/onsi/ginkgo/v2 v2.21.0 + github.com/onsi/ginkgo/v2 v2.20.2 github.com/onsi/gomega v1.35.1 github.com/opencontainers/runc v1.2.1 github.com/opencontainers/selinux v1.11.1 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241121212100-2e43ae5f86e2 + github.com/openshift/api v0.0.0-20241212053709-6b333900129e + github.com/openshift/apiserver-library-go v0.0.0-20241212055705-41777f979e50 + github.com/openshift/client-go v0.0.0-20241212054934-9d86edf6d385 + github.com/openshift/library-go v0.0.0-20241212055402-9dbaddb63ab9 github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 github.com/prometheus/client_golang v1.19.1 @@ -83,23 +88,25 @@ require ( golang.org/x/term v0.25.0 golang.org/x/time v0.7.0 golang.org/x/tools v0.26.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 - google.golang.org/grpc v1.65.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 + google.golang.org/grpc v1.67.0 google.golang.org/protobuf v1.35.1 gopkg.in/evanphx/json-patch.v4 v4.12.0 + gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/square/go-jose.v2 v2.6.0 - k8s.io/api v0.0.0 - k8s.io/apiextensions-apiserver v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/apiserver v0.0.0 + gopkg.in/yaml.v2 v2.4.0 + k8s.io/api v0.32.0 + k8s.io/apiextensions-apiserver v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/apiserver v0.32.0 k8s.io/cli-runtime v0.0.0 - k8s.io/client-go v0.0.0 + k8s.io/client-go v0.32.0 k8s.io/cloud-provider v0.0.0 k8s.io/cluster-bootstrap v0.0.0 - k8s.io/code-generator v0.0.0 - k8s.io/component-base v0.0.0 - k8s.io/component-helpers v0.0.0 - k8s.io/controller-manager v0.0.0 + k8s.io/code-generator v0.32.0 + k8s.io/component-base v0.32.0 + k8s.io/component-helpers v0.32.0-rc.1 + k8s.io/controller-manager v0.32.0-rc.1 k8s.io/cri-api v0.0.0 k8s.io/cri-client v0.0.0 k8s.io/csi-translation-lib v0.0.0 @@ -107,8 +114,8 @@ require ( k8s.io/endpointslice v0.0.0 k8s.io/externaljwt v0.0.0 k8s.io/klog/v2 v2.130.1 - k8s.io/kms v0.0.0 - k8s.io/kube-aggregator v0.0.0 + k8s.io/kms v0.32.0 + k8s.io/kube-aggregator v0.32.0 k8s.io/kube-controller-manager v0.0.0 k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f k8s.io/kube-proxy v0.0.0 @@ -129,6 +136,7 @@ require ( require ( cel.dev/expr v0.18.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect @@ -150,7 +158,9 @@ require ( github.com/fatih/camelcase v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect github.com/go-errors/errors v1.4.2 // indirect + github.com/go-ldap/ldap/v3 v3.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -205,14 +215,14 @@ require ( go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect + go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/mod v0.21.0 // indirect golang.org/x/text v0.19.0 // indirect google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect @@ -223,6 +233,11 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + github.com/openshift/api => github.com/dusk125/api v0.0.0-20241212053709-6b333900129e + github.com/openshift/apiserver-library-go => github.com/dusk125/apiserver-library-go v0.0.0-20241212055705-41777f979e50 + github.com/openshift/client-go => github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385 + github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 k8s.io/api => ./staging/src/k8s.io/api k8s.io/apiextensions-apiserver => ./staging/src/k8s.io/apiextensions-apiserver k8s.io/apimachinery => ./staging/src/k8s.io/apimachinery diff --git a/go.sum b/go.sum index 21e2dbed8cbbf..be9e43a5f30c6 100644 --- a/go.sum +++ b/go.sum @@ -30,7 +30,7 @@ cloud.google.com/go/cloudbuild v1.15.0/go.mod h1:eIXYWmRt3UtggLnFGx4JvXcMj4kShhV cloud.google.com/go/clouddms v1.7.3/go.mod h1:fkN2HQQNUYInAU3NQ3vRLkV2iWs8lIdmBKOx4nrL6Hc= cloud.google.com/go/cloudtasks v1.12.4/go.mod h1:BEPu0Gtt2dU6FxZHNqqNdGqIG86qyWKBPGnsb7udGY0= cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= cloud.google.com/go/contactcenterinsights v1.12.1/go.mod h1:HHX5wrz5LHVAwfI2smIotQG9x8Qd6gYilaHcLLLmNis= cloud.google.com/go/container v1.29.0/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4= cloud.google.com/go/containeranalysis v0.11.3/go.mod h1:kMeST7yWFQMGjiG9K7Eov+fPNQcGhb8mXj/UcTiWw9U= @@ -123,6 +123,8 @@ cloud.google.com/go/websecurityscanner v1.6.4/go.mod h1:mUiyMQ+dGpPPRkHgknIZeCzS cloud.google.com/go/workflows v1.12.3/go.mod h1:fmOUeeqEwPzIU81foMjTRQIdwQHADi/vEr1cx9R1m5g= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e h1:ZU22z/2YRFLyf/P4ZwUYSdNCWsMEI0VeyrFoI2rAhJQ= +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab h1:UKkYhof1njT1/xq4SEg5z+VpTgjmNeHwPGRQl7takDI= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= @@ -134,9 +136,11 @@ github.com/Microsoft/hnslib v0.0.8 h1:EBrIiRB7i/UYIXEC2yw22dn+RLzOmsc5S0bw2xf0Qu github.com/Microsoft/hnslib v0.0.8/go.mod h1:EYveQJlhKh2obmEIRB3uKN6dBd9pj1frPsrTGFppKuk= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/RangelReale/osincli v0.0.0-20160924135400-fababb0555f2/go.mod h1:XyjUkMA8GN+tOOPXvnbi3XuRxWFvTJntqvTFnjmhzbk= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 h1:7Ip0wMmLHLRJdrloDxZfhMm0xrLXZS8+COSu2bXmEQs= @@ -175,7 +179,7 @@ github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObk github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/container-storage-interface/spec v1.9.0 h1:zKtX4STsq31Knz3gciCYCi1SXtO2HJDecIjDVboYavY= @@ -212,14 +216,25 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/distribution/v3 v3.0.0-20230511163743-f7717b7855ca/go.mod h1:t1IxPNGdTGez+YGKyJyQrtSSqisfMIm1hnFhvMPlxtE= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/docker v26.1.4+incompatible h1:vuTpXDuoga+Z38m1OZHzl7NKisKWaWlhjQk7IDPSLsU= github.com/docker/docker v26.1.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/dusk125/api v0.0.0-20241212053709-6b333900129e h1:VtQff4aOjCdx31u6zrt9hPzFx2Ullu1yep4x8bqrRqg= +github.com/dusk125/api v0.0.0-20241212053709-6b333900129e/go.mod h1:lvUN3WEfcZlZxWNEhBKGAbW1UqaIexBLqcYIMXQDh2c= +github.com/dusk125/apiserver-library-go v0.0.0-20241212055705-41777f979e50 h1:Orim/dwZOmFyeYfuqwaXc5ZA/S29Yx95wJenbxECpI4= +github.com/dusk125/apiserver-library-go v0.0.0-20241212055705-41777f979e50/go.mod h1:w4YCdvmWwxudrJnyPg5Sh3aXNXunCdvTZlYQk9m9H6U= +github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385 h1:PC6mGKxev1xAQV4YniBkEzGXkK+faFjw/N+RNbun25Y= +github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385/go.mod h1:yv2o2+uOZRWD4E30SHdQ66mtcpV1qL0Px03vYjrvM4s= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 h1:83mHQ9+8+Fd+6Zb5aNPiUhgjCUiRCHbe6HuTFA2us78= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9/go.mod h1:vbBfvIsLddBDFa0WF+id4m7KuQmNRsVUBH5zIZa2EcQ= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= @@ -227,27 +242,34 @@ github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRr github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A= +github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap/v3 v3.4.3 h1:JCKUtJPIcyOuG7ctGabLKMgIlKnGumD/iGjuWeEruDI= +github.com/go-ldap/ldap/v3 v3.4.3/go.mod h1:7LdHfVt6iIOESVEe3Bs4Jp2sHEKgDeduAhgM1/f9qmo= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -276,7 +298,7 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -284,6 +306,12 @@ github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= +github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= +github.com/gonum/graph v0.0.0-20170401004347-50b27dea7ebb/go.mod h1:ye018NnX1zrbOLqwBvs2HqyyTouQgnL8C+qzYk1snPY= +github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks= +github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A= +github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cadvisor v0.51.0 h1:BspqSPdZoLKrnvuZNOvM/KiJ/A+RdixwagN20n+2H8k= @@ -308,6 +336,7 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -321,7 +350,9 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/ishidawataru/sctp v0.0.0-20230406120618-7ff4192f6ff2 h1:i2fYnDurfLlJH8AyyMOnkLHnHeP8Ff/DDpuZA/D3bPo= @@ -390,8 +421,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -404,17 +433,25 @@ github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8= github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/openshift-eng/openshift-tests-extension v0.0.0-20241121212100-2e43ae5f86e2 h1:3vmVPLYkx16VCiWgoaTa0I0T9K1uqrjk2hPbma/dcIw= +github.com/openshift-eng/openshift-tests-extension v0.0.0-20241121212100-2e43ae5f86e2/go.mod h1:1OhaNsaU9vuy/dlYZLEve7bgE2Ed+yTV5VSbYvGXt4s= +github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0/go.mod h1:wAR5JopumPtAZnu0Cjv2PSqV4p4QB09LMhc6fZZTXuA= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -424,6 +461,7 @@ github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -521,6 +559,7 @@ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVf go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -533,6 +572,7 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -556,6 +596,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= @@ -581,6 +622,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -621,18 +663,18 @@ google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -648,6 +690,7 @@ gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= @@ -671,6 +714,7 @@ sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1 sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/knftables v0.0.17 h1:wGchTyRF/iGTIjd+vRaR1m676HM7jB8soFtyr/148ic= sigs.k8s.io/knftables v0.0.17/go.mod h1:f/5ZLKYEUPUhVjUCg6l80ACdL7CIIyeL0DxfgojGRTk= +sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U= sigs.k8s.io/kustomize/cmd/config v0.15.0/go.mod h1:Jq57b0nPaoYUlOqg//0JtAh6iibboqMcfbtCYoWPM00= diff --git a/go.work.sum b/go.work.sum index 78665c9725dca..2ded0ec8b774b 100644 --- a/go.work.sum +++ b/go.work.sum @@ -24,7 +24,7 @@ cloud.google.com/go/cloudbuild v1.15.0 h1:9IHfEMWdCklJ1cwouoiQrnxmP0q3pH7JUt8Hqx cloud.google.com/go/clouddms v1.7.3 h1:xe/wJKz55VO1+L891a1EG9lVUgfHr9Ju/I3xh1nwF84= cloud.google.com/go/cloudtasks v1.12.4 h1:5xXuFfAjg0Z5Wb81j2GAbB3e0bwroCeSF+5jBn/L650= cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= cloud.google.com/go/contactcenterinsights v1.12.1 h1:EiGBeejtDDtr3JXt9W7xlhXyZ+REB5k2tBgVPVtmNb0= cloud.google.com/go/container v1.29.0 h1:jIltU529R2zBFvP8rhiG1mgeTcnT27KhU0H/1d6SQRg= cloud.google.com/go/containeranalysis v0.11.3 h1:5rhYLX+3a01drpREqBZVXR9YmWH45RnML++8NsCtuD8= @@ -116,9 +116,11 @@ cloud.google.com/go/webrisk v1.9.4 h1:iceR3k0BCRZgf2D/NiKviVMFfuNC9LmeNLtxUFRB/w cloud.google.com/go/websecurityscanner v1.6.4 h1:5Gp7h5j7jywxLUp6NTpjNPkgZb3ngl0tUSw6ICWvtJQ= cloud.google.com/go/workflows v1.12.3 h1:qocsqETmLAl34mSa01hKZjcqAvt699gaoFbooGGMvaM= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/RangelReale/osincli v0.0.0-20160924135400-fababb0555f2 h1:x8Brv0YNEe6jY3V/hQglIG2nd8g5E2Zj5ubGKkPQctQ= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= github.com/aws/aws-sdk-go-v2 v1.30.1 h1:4y/5Dvfrhd1MxRDD77SrfsDaj8kUkkljU7XE83NPV+o= github.com/aws/aws-sdk-go-v2/config v1.27.24 h1:NM9XicZ5o1CBU/MZaHwFtimRpWx9ohAUAqkG6AqSqPo= github.com/aws/aws-sdk-go-v2/credentials v1.17.24 h1:YclAsrnb1/GTQNt2nzv+756Iw4mF8AOzcDfweWwwm/M= @@ -138,20 +140,35 @@ github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg= github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro= -github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= -github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/distribution/distribution/v3 v3.0.0-20230511163743-f7717b7855ca h1:yGaIDzPWkgU+yRvI2x/rGdOU1hl6bLZzm0mETEUSHwk= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= +github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/felixge/fgprof v0.9.4 h1:ocDNwMFlnA0NU0zSB3I52xkO4sFXk80VK9lXjLClu88= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= +github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= -github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= +github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac h1:Q0Jsdxl5jbxouNs1TQYt0gxesYMU4VXRbsTlgDloZ50= +github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82 h1:EvokxLQsaaQjcWVWSV38221VAK7qc2zhaO17bKys/18= +github.com/gonum/graph v0.0.0-20170401004347-50b27dea7ebb h1:NcVXNHJrvrcAv8SVYKzKT2zwtEXU1DK2J+azsK7oz2A= +github.com/gonum/internal v0.0.0-20181124074243-f884aa714029 h1:8jtTdc+Nfj9AR+0soOeia9UZSvYBvETVHZrugUowJ7M= +github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9 h1:7qnwS9+oeSiOIsiUMajT+0R7HR6hw5NegnKPmn/94oI= +github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9 h1:V2IgdyerlBa/MxaEFRbV5juy/C3MGdj4ePi+g6ePIp4= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465 h1:KwWnWVWCNtNq/ewIX7HIKnELmEx2nDP42yskD/pi7QE= +github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY= @@ -163,8 +180,13 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09 github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660 h1:F0zE2bmdVvaEd18VXuGYQdJJ1FYJu4MIDW9PYZWc9No= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= +github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0 h1:AHzMWDxNiAVscJL6+4wkvFRTpMnJqiaZFEKA/osaBXE= +github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= @@ -173,12 +195,11 @@ github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8 github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457 h1:zf5N6UOrA487eEFacMePxjXAJctxKmyjKUsjA11Uzuk= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= +sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 h1:PFWFSkpArPNJxFX4ZKWAk9NSeRoZaXschn+ULa4xVek= sigs.k8s.io/kustomize/cmd/config v0.15.0 h1:WkdY8V2+8J+W00YbImXa2ke9oegfrHH79e+kywW7EdU= diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh index 6b673a129259d..ffc7c24e5bc17 100755 --- a/hack/lib/golang.sh +++ b/hack/lib/golang.sh @@ -78,6 +78,9 @@ kube::golang::server_targets() { staging/src/k8s.io/kube-aggregator staging/src/k8s.io/apiextensions-apiserver cluster/gce/gci/mounter + cmd/watch-termination + openshift-hack/cmd/k8s-tests + openshift-hack/cmd/k8s-tests-ext ) echo "${targets[@]}" } @@ -316,20 +319,7 @@ readonly KUBE_ALL_TARGETS=( ) readonly KUBE_ALL_BINARIES=("${KUBE_ALL_TARGETS[@]##*/}") -readonly KUBE_STATIC_BINARIES=( - apiextensions-apiserver - kube-aggregator - kube-apiserver - kube-controller-manager - kube-scheduler - kube-proxy - kube-log-runner - kubeadm - kubectl - kubectl-convert - kubemark - mounter -) +readonly KUBE_STATIC_BINARIES=() # Fully-qualified package names that we want to instrument for coverage information. readonly KUBE_COVERAGE_INSTRUMENTED_PACKAGES=( @@ -508,7 +498,7 @@ kube::golang::set_platform_envs() { # if CC is defined for platform then always enable it ccenv=$(echo "$platform" | awk -F/ '{print "KUBE_" toupper($1) "_" toupper($2) "_CC"}') - if [ -n "${!ccenv-}" ]; then + if [ -n "${!ccenv-}" ]; then export CGO_ENABLED=1 export CC="${!ccenv}" fi @@ -519,27 +509,6 @@ kube::golang::set_platform_envs() { # env-var GO_VERSION is the desired go version to use, downloading it if needed (defaults to content of .go-version) # env-var FORCE_HOST_GO set to a non-empty value uses the go version in the $PATH and skips ensuring $GO_VERSION is used kube::golang::internal::verify_go_version() { - # default GO_VERSION to content of .go-version - GO_VERSION="${GO_VERSION:-"$(cat "${KUBE_ROOT}/.go-version")"}" - if [ "${GOTOOLCHAIN:-auto}" != 'auto' ]; then - # no-op, just respect GOTOOLCHAIN - : - elif [ -n "${FORCE_HOST_GO:-}" ]; then - # ensure existing host version is used, like before GOTOOLCHAIN existed - export GOTOOLCHAIN='local' - else - # otherwise, we want to ensure the go version matches GO_VERSION - GOTOOLCHAIN="go${GO_VERSION}" - export GOTOOLCHAIN - # if go is either not installed or too old to respect GOTOOLCHAIN then use gimme - if ! (command -v go >/dev/null && [ "$(go version | cut -d' ' -f3)" = "${GOTOOLCHAIN}" ]); then - export GIMME_ENV_PREFIX=${GIMME_ENV_PREFIX:-"${KUBE_OUTPUT}/.gimme/envs"} - export GIMME_VERSION_PREFIX=${GIMME_VERSION_PREFIX:-"${KUBE_OUTPUT}/.gimme/versions"} - # eval because the output of this is shell to set PATH etc. - eval "$("${KUBE_ROOT}/third_party/gimme/gimme" "${GO_VERSION}")" - fi - fi - if [[ -z "$(command -v go)" ]]; then kube::log::usage_from_stdin </dev/null 2>&1; then - go -C "${KUBE_ROOT}/hack/tools" install ./ncpu || echo "Will not automatically set GOMAXPROCS" + # shellcheck disable=SC2164 + pushd "${KUBE_ROOT}/hack/tools" >/dev/null + go install -mod=readonly ./ncpu || echo "Will not automatically set GOMAXPROCS" + # shellcheck disable=SC2164 + popd >/dev/null fi if command -v ncpu >/dev/null 2>&1; then GOMAXPROCS=$(ncpu) diff --git a/hack/lib/version.sh b/hack/lib/version.sh index ffd8bc3789a09..ae3853df3841a 100644 --- a/hack/lib/version.sh +++ b/hack/lib/version.sh @@ -37,6 +37,8 @@ kube::version::get_version_vars() { return fi + KUBE_GIT_VERSION=$(sed -rn 's/.*io.openshift.build.versions="kubernetes=(1.[0-9]+.[0-9]+)"/v\1/p' openshift-hack/images/hyperkube/Dockerfile.rhel) + # If the kubernetes source was exported through git archive, then # we likely don't have a git tree, but these magic values may be filled in. # shellcheck disable=SC2016,SC2050 diff --git a/hack/make-rules/test.sh b/hack/make-rules/test.sh index afddf8df9825d..d2326809a342f 100755 --- a/hack/make-rules/test.sh +++ b/hack/make-rules/test.sh @@ -52,7 +52,8 @@ kube::test::find_go_packages() { -e '^k8s.io/kubernetes/test/e2e$' \ -e '^k8s.io/kubernetes/test/e2e_node(/.*)?$' \ -e '^k8s.io/kubernetes/test/e2e_kubeadm(/.*)?$' \ - -e '^k8s.io/.*/test/integration(/.*)?$' + -e '^k8s.io/.*/test/integration(/.*)?$' \ + -e '^k8s.io/kubernetes/openshift-hack/e2e(/.*)?$' ) } diff --git a/hack/make-rules/update.sh b/hack/make-rules/update.sh index 69684b5d7852e..aa7cf66378373 100755 --- a/hack/make-rules/update.sh +++ b/hack/make-rules/update.sh @@ -36,6 +36,8 @@ if ! ${ALL} ; then fi BASH_TARGETS=( + update-kubensenter + update-test-annotations update-codegen update-generated-api-compatibility-data update-generated-docs diff --git a/hack/make-rules/verify.sh b/hack/make-rules/verify.sh index c53cbf230e871..dd0cfbd946c7e 100755 --- a/hack/make-rules/verify.sh +++ b/hack/make-rules/verify.sh @@ -44,6 +44,23 @@ EXCLUDED_PATTERNS=( "verify-openapi-docs-urls.sh" # Spams docs URLs, don't run in CI. ) +# Excluded checks for openshift/kubernetes fork that are always skipped. +EXCLUDED_PATTERNS+=( + "verify-boilerplate.sh" # Carries do not require boilerplate + "verify-no-vendor-cycles.sh" # Incompatible with the way many carries are specified + "verify-publishing-bot.sh" # Verifies the upstream rules, which are not maintained in o/k +) + +# Skipped checks for openshift/kubernetes fork that need to be fixed. +EXCLUDED_PATTERNS+=( + "verify-openapi-spec.sh" # TODO(soltysh) Fails in CI during trap phase + "verify-golangci-lint.sh" # TODO(soltysh) Fails to build required tooling + "verify-shellcheck.sh" # TODO(soltysh) Requires either docker or local shellcheck + "verify-spelling.sh" # TODO(soltysh) Need to ensure installation of misspell command + "verify-mocks.sh" # TODO(soltysh) I don't expect us needed mocks re-generation + "verify-e2e-suites.sh" # TODO(atiratree) needs to be patched for openshift-hack dir and --list-tests option is disabled by 'UPSTREAM: : temporarily disable reporting e2e text bugs and enforce 2nd labeling to make tests work' +) + # Exclude typecheck in certain cases, if they're running in a separate job. if [[ ${EXCLUDE_TYPECHECK:-} =~ ^[yY]$ ]]; then EXCLUDED_PATTERNS+=( diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index 281a6f2749a98..a89a1b14247f2 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -763,7 +763,7 @@ function codegen::subprojects() { CODEGEN_PKG="${codegen}" \ UPDATE_API_KNOWN_VIOLATIONS="${UPDATE_API_KNOWN_VIOLATIONS}" \ API_KNOWN_VIOLATIONS_DIR="${API_KNOWN_VIOLATIONS_DIR}" \ - ./hack/update-codegen.sh > >(indent) 2> >(indent >&2) + GOFLAGS=-mod=readonly ./hack/update-codegen.sh > >(indent) 2> >(indent >&2) popd >/dev/null done } diff --git a/hack/update-kubensenter.sh b/hack/update-kubensenter.sh new file mode 120000 index 0000000000000..1b263065ff459 --- /dev/null +++ b/hack/update-kubensenter.sh @@ -0,0 +1 @@ +../openshift-hack/update-kubensenter.sh \ No newline at end of file diff --git a/hack/update-openapi-spec.sh b/hack/update-openapi-spec.sh index 55abf904f732e..22b65f7eb943b 100755 --- a/hack/update-openapi-spec.sh +++ b/hack/update-openapi-spec.sh @@ -28,7 +28,7 @@ source "${KUBE_ROOT}/hack/lib/init.sh" kube::util::require-jq kube::golang::setup_env -kube::etcd::install +# kube::etcd::install # We need to call `make` here because that includes all of the compile and link # flags that we use for a production build, which we need for this script. diff --git a/hack/update-test-annotations.sh b/hack/update-test-annotations.sh new file mode 120000 index 0000000000000..ecf920cd8d6b4 --- /dev/null +++ b/hack/update-test-annotations.sh @@ -0,0 +1 @@ +../openshift-hack/update-test-annotations.sh \ No newline at end of file diff --git a/hack/update-vendor.sh b/hack/update-vendor.sh index f260c92a453d6..c1bdf1b44bdae 100755 --- a/hack/update-vendor.sh +++ b/hack/update-vendor.sh @@ -308,7 +308,7 @@ hack/update-vendor-licenses.sh kube::log::status "vendor: creating OWNERS file" >&11 rm -f "vendor/OWNERS" cat <<__EOF__ > "vendor/OWNERS" -# See the OWNERS docs at https://go.k8s.io/owners +See the OWNERS docs at https://go.k8s.io/owners options: # make root approval non-recursive @@ -326,7 +326,7 @@ for repo in $(kube::util::list_staging_repos); do echo "=== checking k8s.io/${repo}" cd "staging/src/k8s.io/${repo}" loopback_deps=() - kube::util::read-array loopback_deps < <(go list all 2>/dev/null | grep k8s.io/kubernetes/ || true) + kube::util::read-array loopback_deps < <(go list all 2>/dev/null | grep k8s.io/kubernetes/ | grep -v github.com/openshift/apiserver-library-go || true) if (( "${#loopback_deps[@]}" > 0 )); then kube::log::error "${#loopback_deps[@]} disallowed ${repo} -> k8s.io/kubernetes dependencies exist via the following imports: $(go mod why "${loopback_deps[@]}")" >&22 2>&1 exit 1 @@ -336,8 +336,9 @@ done kube::log::status "go.mod: prevent k8s.io/kubernetes --> * --> k8s.io/kubernetes dep" >&11 loopback_deps=() -kube::util::read-array loopback_deps < <(go mod graph | grep ' k8s.io/kubernetes' || true) -if (( "${#loopback_deps[@]}" > 0 )); then +kube::util::read-array loopback_deps < <(go mod graph | grep ' k8s.io/kubernetes' | grep -v github.com/openshift/apiserver-library-go || true) +# Allow apiserver-library-go to depend on k8s.io/kubernetes +if [[ -n ${loopback_deps[*]:+"${loopback_deps[*]}"} && ! "${loopback_deps[*]}" =~ github.com/openshift/apiserver-library-go ]]; then kube::log::error "${#loopback_deps[@]} disallowed transitive k8s.io/kubernetes dependencies exist via the following imports:" >&22 2>&1 kube::log::error "${loopback_deps[@]}" >&22 2>&1 exit 1 diff --git a/hack/verify-external-dependencies-version.sh b/hack/verify-external-dependencies-version.sh index 4734f199e09d3..a395239576fc3 100755 --- a/hack/verify-external-dependencies-version.sh +++ b/hack/verify-external-dependencies-version.sh @@ -31,7 +31,7 @@ export GOBIN="${KUBE_OUTPUT_BIN}" PATH="${GOBIN}:${PATH}" # Install zeitgeist -go install sigs.k8s.io/zeitgeist@v0.5.4 +go install -mod=readonly sigs.k8s.io/zeitgeist@v0.5.4 # Prefer full path for running zeitgeist ZEITGEIST_BIN="$(which zeitgeist)" diff --git a/hack/verify-govulncheck.sh b/hack/verify-govulncheck.sh index 5057f9a314233..120f2f4dcb33a 100755 --- a/hack/verify-govulncheck.sh +++ b/hack/verify-govulncheck.sh @@ -27,7 +27,7 @@ kube::util::ensure_clean_working_dir # This sets up the environment, like GOCACHE, which keeps the worktree cleaner. kube::golang::setup_env -go install golang.org/x/vuln/cmd/govulncheck@v1.1.2 +go install -mod=readonly golang.org/x/vuln/cmd/govulncheck@v1.1.2 # KUBE_VERIFY_GIT_BRANCH is populated in verify CI jobs BRANCH="${KUBE_VERIFY_GIT_BRANCH:-master}" @@ -45,7 +45,7 @@ pushd "${WORKTREE}" >/dev/null govulncheck -scan package ./... > "${KUBE_TEMP}/pr-base.txt" || true popd >/dev/null -echo -e "\n HEAD: $(cat "${KUBE_TEMP}"/head.txt)" -echo -e "\n PR_BASE: $(cat "${KUBE_TEMP}/pr-base.txt")" +echo -e "\n HEAD: $(cat "${KUBE_TEMP}"/head.txt)" +echo -e "\n PR_BASE: $(cat "${KUBE_TEMP}/pr-base.txt")" diff -s -u --ignore-all-space "${KUBE_TEMP}"/pr-base.txt "${KUBE_TEMP}"/head.txt || true diff --git a/hack/verify-kubensenter.sh b/hack/verify-kubensenter.sh new file mode 120000 index 0000000000000..01e1608f153ca --- /dev/null +++ b/hack/verify-kubensenter.sh @@ -0,0 +1 @@ +../openshift-hack/verify-kubensenter.sh \ No newline at end of file diff --git a/hack/verify-openapi-spec.sh b/hack/verify-openapi-spec.sh index a8eaf4ef3f2c6..c88b96b1d15d0 100755 --- a/hack/verify-openapi-spec.sh +++ b/hack/verify-openapi-spec.sh @@ -25,6 +25,7 @@ set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. -source "${KUBE_ROOT}/hack/lib/verify-generated.sh" +kube::golang::setup_env +# kube::etcd::install kube::verify::generated "Generated files need to be updated" "Please run 'hack/update-openapi-spec.sh'" hack/update-openapi-spec.sh "$@" diff --git a/hack/verify-spelling.sh b/hack/verify-spelling.sh index d4d8be5631a47..0dfc8e76415ed 100755 --- a/hack/verify-spelling.sh +++ b/hack/verify-spelling.sh @@ -32,7 +32,9 @@ export GOBIN="${KUBE_OUTPUT_BIN}" PATH="${GOBIN}:${PATH}" # Install tools we need -go -C "${KUBE_ROOT}/hack/tools" install github.com/client9/misspell/cmd/misspell +pushd "${KUBE_ROOT}/hack/tools" >/dev/null + go install -mod=readonly github.com/client9/misspell/cmd/misspell +popd >/dev/null # Spell checking # All the skipping files are defined in hack/.spelling_failures diff --git a/hack/verify-test-annotations.sh b/hack/verify-test-annotations.sh new file mode 120000 index 0000000000000..a9cbed2d3245a --- /dev/null +++ b/hack/verify-test-annotations.sh @@ -0,0 +1 @@ +../openshift-hack/verify-test-annotations.sh \ No newline at end of file diff --git a/hack/verify-vendor.sh b/hack/verify-vendor.sh index c68fbbbc84081..028a551da5b5e 100755 --- a/hack/verify-vendor.sh +++ b/hack/verify-vendor.sh @@ -84,8 +84,12 @@ pushd "${KUBE_ROOT}" > /dev/null 2>&1 ret=1 fi + # Given that we don't intend to publish staging repos from our fork, + # it does not seem necessary to ensure that dependencies will match + # across staging repos when published. + # # Verify we are pinned to matching levels - hack/lint-dependencies.sh >&2 + #hack/lint-dependencies.sh >&2 popd > /dev/null 2>&1 if [[ ${ret} -gt 0 ]]; then diff --git a/openshift-hack/build-go.sh b/openshift-hack/build-go.sh new file mode 100755 index 0000000000000..dfc663d23a593 --- /dev/null +++ b/openshift-hack/build-go.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +STARTTIME=$(date +%s) + +# shellcheck source=openshift-hack/lib/init.sh +source "$(dirname "${BASH_SOURCE[0]}")/lib/init.sh" + +pushd "${OS_ROOT}" > /dev/null || exit 1 + make all WHAT='cmd/kube-apiserver cmd/kube-controller-manager cmd/kube-scheduler cmd/kubelet' +popd > /dev/null || exit 1 + +os::build::version::git_vars + +if [[ "${OS_GIT_TREE_STATE:-dirty}" == "clean" ]]; then + # only when we are building from a clean state can we claim to + # have created a valid set of binaries that can resemble a release + mkdir -p "${OS_OUTPUT_RELEASEPATH}" + echo "${OS_GIT_COMMIT}" > "${OS_OUTPUT_RELEASEPATH}/.commit" +fi + +ret=$?; ENDTIME=$(date +%s); echo "$0 took $((ENDTIME - STARTTIME)) seconds"; exit "$ret" diff --git a/openshift-hack/build-rpms.sh b/openshift-hack/build-rpms.sh new file mode 100755 index 0000000000000..7fec9962e634f --- /dev/null +++ b/openshift-hack/build-rpms.sh @@ -0,0 +1,131 @@ +#!/usr/bin/env bash + +# This script generates RPMs into _output/releases. All build +# dependencies are required on the host. The build will be performed +# by the upstream makefile called from the spec file. +# shellcheck source=openshift-hack/lib/init.sh +source "$(dirname "${BASH_SOURCE[0]}")/lib/init.sh" + +# Only build linux by default. Clearing this value will build all platforms +OS_ONLY_BUILD_PLATFORMS="${OS_ONLY_BUILD_PLATFORMS:-linux/amd64}" + +function cleanup() { + return_code=$? + os::util::describe_return_code "${return_code}" + exit "${return_code}" +} +trap "cleanup" EXIT + +# check whether we are in a clean output state +dirty="$( if [[ -d "${OS_OUTPUT}" ]]; then echo '1'; fi )" + +os::util::ensure::system_binary_exists rpmbuild +os::util::ensure::system_binary_exists createrepo + +if [[ -n "${OS_BUILD_SRPM-}" ]]; then + srpm="a" +else + srpm="b" +fi + +os::build::rpm::get_nvra_vars + +OS_RPM_SPECFILE="$( find "${OS_ROOT}" -name '*.spec' )" +OS_RPM_SPECQUERY="$( rpmspec -q --qf '%{name}\n' "${OS_RPM_SPECFILE}" )" +OS_RPM_NAME="$( head -1 <<< "${OS_RPM_SPECQUERY}" )" + +os::log::info "Building release RPMs for ${OS_RPM_SPECFILE} ..." + +rpm_tmp_dir="${BASETMPDIR}/rpm" + +# RPM requires the spec file be owned by the invoking user +chown "$(id -u):$(id -g)" "${OS_RPM_SPECFILE}" || true + +if [[ -n "${dirty}" && "${OS_GIT_TREE_STATE}" == "dirty" ]]; then + os::log::warning "Repository is not clean, performing fast build and reusing _output" + + # build and output from source to destination + rm -rf "${rpm_tmp_dir}" + mkdir -p "${rpm_tmp_dir}" + ln -fns "${OS_ROOT}" "${rpm_tmp_dir}/SOURCES" + ln -fns "${OS_ROOT}" "${rpm_tmp_dir}/BUILD" + rpmbuild -bb "${OS_RPM_SPECFILE}" \ + --define "_sourcedir ${rpm_tmp_dir}/SOURCES" \ + --define "_builddir ${rpm_tmp_dir}/BUILD" \ + --define "skip_prep 1" \ + --define "skip_dist ${SKIP_DIST:-1}" \ + --define "version ${OS_RPM_VERSION}" \ + --define "release ${OS_RPM_RELEASE}" \ + --define "commit ${OS_GIT_COMMIT}" \ + --define "os_git_vars ${OS_RPM_GIT_VARS}" \ + --define "_topdir ${rpm_tmp_dir}" + + mkdir -p "${OS_OUTPUT_RPMPATH}" + mv -f "${rpm_tmp_dir}"/RPMS/*/*.rpm "${OS_OUTPUT_RPMPATH}" + +else + rm -rf "${rpm_tmp_dir}/SOURCES" + mkdir -p "${rpm_tmp_dir}/SOURCES" + tar czf "${rpm_tmp_dir}/SOURCES/${OS_RPM_NAME}-${OS_RPM_VERSION}.tar.gz" \ + --owner=0 --group=0 \ + --exclude=_output --exclude=.git \ + --transform "s|^|${OS_RPM_NAME}-${OS_RPM_VERSION}/|rSH" \ + . + + rpmbuild -b${srpm} "${OS_RPM_SPECFILE}" \ + --define "skip_dist ${SKIP_DIST:-1}" \ + --define "version ${OS_RPM_VERSION}" \ + --define "release ${OS_RPM_RELEASE}" \ + --define "commit ${OS_GIT_COMMIT}" \ + --define "os_git_vars ${OS_RPM_GIT_VARS}" \ + --define "_topdir ${rpm_tmp_dir}" + + output_directory="$( find "${rpm_tmp_dir}" -type d -path "*/BUILD/${OS_RPM_NAME}-${OS_RPM_VERSION}/_output/local" )" + if [[ -z "${output_directory}" ]]; then + os::log::fatal 'No _output artifact directory found in rpmbuild artifacts!' + fi + + # migrate the rpm artifacts to the output directory, must be clean or move will fail + make clean + mkdir -p "${OS_OUTPUT}" + + # mv exits prematurely with status 1 in the following scenario: running as root, + # attempting to move a [directory tree containing a] symlink to a destination on + # an NFS volume exported with root_squash set. This can occur when running this + # script on a Vagrant box. The error shown is "mv: failed to preserve ownership + # for $FILE: Operation not permitted". As a workaround, if + # ${output_directory} and ${OS_OUTPUT} are on different devices, use cp and + # rm instead. + if [[ $(stat -c %d "${output_directory}") == $(stat -c %d "${OS_OUTPUT}") ]]; then + mv "${output_directory}"/* "${OS_OUTPUT}" + else + cp -R "${output_directory}"/* "${OS_OUTPUT}" + rm -rf "${output_directory:?}"/* + fi + + mkdir -p "${OS_OUTPUT_RPMPATH}" + if [[ -n "${OS_BUILD_SRPM-}" ]]; then + mv -f "${rpm_tmp_dir}"/SRPMS/*src.rpm "${OS_OUTPUT_RPMPATH}" + fi + mv -f "${rpm_tmp_dir}"/RPMS/*/*.rpm "${OS_OUTPUT_RPMPATH}" +fi + +mkdir -p "${OS_OUTPUT_RELEASEPATH}" +echo "${OS_GIT_COMMIT}" > "${OS_OUTPUT_RELEASEPATH}/.commit" + +repo_path="$( os::util::absolute_path "${OS_OUTPUT_RPMPATH}" )" +createrepo "${repo_path}" + +echo "[${OS_RPM_NAME}-local-release] +baseurl = file://${repo_path} +gpgcheck = 0 +name = Release from Local Source for ${OS_RPM_NAME} +enabled = 1 +" > "${repo_path}/local-release.repo" + +# DEPRECATED: preserve until jobs migrate to using local-release.repo +cp "${repo_path}/local-release.repo" "${repo_path}/origin-local-release.repo" + +os::log::info "Repository file for \`yum\` or \`dnf\` placed at ${repo_path}/local-release.repo +Install it with: +$ mv '${repo_path}/local-release.repo' '/etc/yum.repos.d" diff --git a/openshift-hack/cmd/go-imports-diff/main.go b/openshift-hack/cmd/go-imports-diff/main.go new file mode 100644 index 0000000000000..6d7ec96f55cff --- /dev/null +++ b/openshift-hack/cmd/go-imports-diff/main.go @@ -0,0 +1,74 @@ +package main + +import ( + "flag" + "fmt" + "go/parser" + "go/token" + "os" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" +) + +const testPackagePrefix = "k8s.io/kubernetes/test/e2e" + +func main() { + // Parse flags + excludeList := flag.String("exclude", "", "Comma-separated list of imports to be ignored") + flag.Parse() + + // Parse positional arguments + args := flag.Args() + if len(args) != 2 { + fmt.Fprintf(os.Stderr, "Usage: %s [flags] \n", os.Args[0]) + flag.PrintDefaults() + os.Exit(2) + } + baseFile := args[0] + compareFile := args[1] + + // Parse the base file + baseNode, err := parser.ParseFile(token.NewFileSet(), baseFile, nil, parser.AllErrors) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to parse go file %s: %v\n", baseFile, err) + os.Exit(1) + } + + // Create a slice containing imports of base file + baseImports := sets.New[string]() + for _, imp := range baseNode.Imports { + v := strings.Trim(imp.Path.Value, `"`) + if !strings.Contains(v, testPackagePrefix) { + continue + } + baseImports.Insert(v) + } + + // Parse file that is compared with the base one + compareNode, err := parser.ParseFile(token.NewFileSet(), compareFile, nil, parser.AllErrors) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to parse go file %s: %v\n", baseFile, err) + os.Exit(1) + } + + // Create a slice containing imports of compare file + compareImports := sets.New[string]() + for _, imp := range compareNode.Imports { + v := strings.Trim(imp.Path.Value, `"`) + if !strings.Contains(v, testPackagePrefix) { + continue + } + compareImports.Insert(v) + } + + // Compare imports of both files + exclude := strings.Split(*excludeList, ",") + diff := baseImports.Difference(compareImports).Delete(exclude...).UnsortedList() + if len(diff) > 0 { + sort.Strings(diff) + fmt.Fprintf(os.Stderr, "Imports from %q not in %q:\n\n%s\n", baseFile, compareFile, strings.Join(diff, "\n")) + os.Exit(1) + } +} diff --git a/openshift-hack/cmd/k8s-tests-ext/k8s-tests.go b/openshift-hack/cmd/k8s-tests-ext/k8s-tests.go new file mode 100644 index 0000000000000..5314527b92f09 --- /dev/null +++ b/openshift-hack/cmd/k8s-tests-ext/k8s-tests.go @@ -0,0 +1,105 @@ +package main + +import ( + "os" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/openshift-eng/openshift-tests-extension/pkg/cmd" + e "github.com/openshift-eng/openshift-tests-extension/pkg/extension" + "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" + g "github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo" + v "github.com/openshift-eng/openshift-tests-extension/pkg/version" + + "k8s.io/client-go/pkg/version" + utilflag "k8s.io/component-base/cli/flag" + "k8s.io/component-base/logs" + "k8s.io/kubernetes/openshift-hack/e2e/annotate/generated" + + // initialize framework extensions + _ "k8s.io/kubernetes/test/e2e/framework/debug/init" + _ "k8s.io/kubernetes/test/e2e/framework/metrics/init" +) + +func main() { + logs.InitLogs() + defer logs.FlushLogs() + pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) + + // Get version info from kube + kubeVersion := version.Get() + v.GitTreeState = kubeVersion.GitTreeState + v.BuildDate = kubeVersion.BuildDate + v.CommitFromGit = kubeVersion.GitCommit + + // Create our registry of openshift-tests extensions + extensionRegistry := e.NewRegistry() + kubeTestsExtension := e.NewExtension("openshift", "payload", "hyperkube") + extensionRegistry.Register(kubeTestsExtension) + + // Carve up the kube tests into our openshift suites... + kubeTestsExtension.AddSuite(e.Suite{ + Name: "kubernetes/conformance/parallel", + Parents: []string{ + "openshift/conformance/parallel", + "openshift/conformance/parallel/minimal", + }, + Qualifiers: []string{`!labels.exists(l, l == "Serial") && labels.exists(l, l == "Conformance")`}, + }) + + kubeTestsExtension.AddSuite(e.Suite{ + Name: "kubernetes/conformance/serial", + Parents: []string{ + "openshift/conformance/serial", + "openshift/conformance/serial/minimal", + }, + Qualifiers: []string{`labels.exists(l, l == "Serial") && labels.exists(l, l == "Conformance")`}, + }) + + //FIXME(stbenjam): what other suites does k8s-test contribute to? + + // Build our specs from ginkgo + specs, err := g.BuildExtensionTestSpecsFromOpenShiftGinkgoSuite() + if err != nil { + panic(err) + } + + // Initialization for kube ginkgo test framework needs to run before all tests execute + specs.AddBeforeAll(func() { + if err := initializeTestFramework(os.Getenv("TEST_PROVIDER")); err != nil { + panic(err) + } + }) + + // Annotations get appended to test names, these are additions to upstream + // tests for controlling skips, suite membership, etc. + // + // TODO: + // - Remove this annotation code, and migrate to Labels/Tags and + // the environmental skip code from the enhancement once its implemented. + // - Make sure to account for test renames that occur because of removal of these + // annotations + specs.Walk(func(spec *extensiontests.ExtensionTestSpec) { + if annotations, ok := generated.Annotations[spec.Name]; ok { + spec.Name += annotations + } + }) + + kubeTestsExtension.AddSpecs(specs) + + // Cobra stuff + root := &cobra.Command{ + Long: "Kubernetes tests extension for OpenShift", + } + + root.AddCommand( + cmd.DefaultExtensionCommands(extensionRegistry)..., + ) + + if err := func() error { + return root.Execute() + }(); err != nil { + os.Exit(1) + } +} diff --git a/openshift-hack/cmd/k8s-tests-ext/provider.go b/openshift-hack/cmd/k8s-tests-ext/provider.go new file mode 100644 index 0000000000000..cdc948a45c652 --- /dev/null +++ b/openshift-hack/cmd/k8s-tests-ext/provider.go @@ -0,0 +1,147 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + kclientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/kubernetes/openshift-hack/e2e" + conformancetestdata "k8s.io/kubernetes/test/conformance/testdata" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/testfiles" + "k8s.io/kubernetes/test/e2e/storage/external" + e2etestingmanifests "k8s.io/kubernetes/test/e2e/testing-manifests" + testfixtures "k8s.io/kubernetes/test/fixtures" + + // this appears to inexplicably auto-register global flags. + _ "k8s.io/kubernetes/test/e2e/storage/drivers" + + // these are loading important global flags that we need to get and set + _ "k8s.io/kubernetes/test/e2e" + _ "k8s.io/kubernetes/test/e2e/lifecycle" +) + +// copied directly from github.com/openshift/origin/cmd/openshift-tests/provider.go +// and github.com/openshift/origin/test/extended/util/test.go +func initializeTestFramework(provider string) error { + providerInfo := &ClusterConfiguration{} + if err := json.Unmarshal([]byte(provider), &providerInfo); err != nil { + return fmt.Errorf("provider must be a JSON object with the 'type' key at a minimum: %v", err) + } + if len(providerInfo.ProviderName) == 0 { + return fmt.Errorf("provider must be a JSON object with the 'type' key") + } + config := &ClusterConfiguration{} + if err := json.Unmarshal([]byte(provider), config); err != nil { + return fmt.Errorf("provider must decode into the ClusterConfig object: %v", err) + } + + // update testContext with loaded config + testContext := &framework.TestContext + testContext.Provider = config.ProviderName + testContext.CloudConfig = framework.CloudConfig{ + ProjectID: config.ProjectID, + Region: config.Region, + Zone: config.Zone, + Zones: config.Zones, + NumNodes: config.NumNodes, + MultiMaster: config.MultiMaster, + MultiZone: config.MultiZone, + ConfigFile: config.ConfigFile, + } + testContext.AllowedNotReadyNodes = -1 + testContext.MinStartupPods = -1 + testContext.MaxNodesToGather = 0 + testContext.KubeConfig = os.Getenv("KUBECONFIG") + + // allow the CSI tests to access test data, but only briefly + // TODO: ideally CSI would not use any of these test methods + // var err error + // exutil.WithCleanup(func() { err = initCSITests(dryRun) }) + // TODO: for now I'm only initializing CSI directly, but we probably need that + // WithCleanup here as well + if err := initCSITests(); err != nil { + return err + } + + if ad := os.Getenv("ARTIFACT_DIR"); len(strings.TrimSpace(ad)) == 0 { + os.Setenv("ARTIFACT_DIR", filepath.Join(os.TempDir(), "artifacts")) + } + + testContext.DeleteNamespace = os.Getenv("DELETE_NAMESPACE") != "false" + testContext.VerifyServiceAccount = true + testfiles.AddFileSource(e2etestingmanifests.GetE2ETestingManifestsFS()) + testfiles.AddFileSource(testfixtures.GetTestFixturesFS()) + testfiles.AddFileSource(conformancetestdata.GetConformanceTestdataFS()) + testContext.KubectlPath = "kubectl" + // context.KubeConfig = KubeConfigPath() + testContext.KubeConfig = os.Getenv("KUBECONFIG") + + // "debian" is used when not set. At least GlusterFS tests need "custom". + // (There is no option for "rhel" or "centos".) + testContext.NodeOSDistro = "custom" + testContext.MasterOSDistro = "custom" + + // load and set the host variable for kubectl + clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&clientcmd.ClientConfigLoadingRules{ExplicitPath: testContext.KubeConfig}, &clientcmd.ConfigOverrides{}) + cfg, err := clientConfig.ClientConfig() + if err != nil { + return err + } + testContext.Host = cfg.Host + + // Ensure that Kube tests run privileged (like they do upstream) + testContext.CreateTestingNS = func(ctx context.Context, baseName string, c kclientset.Interface, labels map[string]string) (*corev1.Namespace, error) { + return e2e.CreateTestingNS(ctx, baseName, c, labels, true) + } + + gomega.RegisterFailHandler(ginkgo.Fail) + + framework.AfterReadingAllFlags(testContext) + testContext.DumpLogsOnFailure = true + + // these constants are taken from kube e2e and used by tests + testContext.IPFamily = "ipv4" + if config.HasIPv6 && !config.HasIPv4 { + testContext.IPFamily = "ipv6" + } + + testContext.ReportDir = os.Getenv("TEST_JUNIT_DIR") + + return nil +} + +const ( + manifestEnvVar = "TEST_CSI_DRIVER_FILES" +) + +// copied directly from github.com/openshift/origin/cmd/openshift-tests/csi.go +// Initialize openshift/csi suite, i.e. define CSI tests from TEST_CSI_DRIVER_FILES. +func initCSITests() error { + manifestList := os.Getenv(manifestEnvVar) + if manifestList != "" { + manifests := strings.Split(manifestList, ",") + for _, manifest := range manifests { + if err := external.AddDriverDefinition(manifest); err != nil { + return fmt.Errorf("failed to load manifest from %q: %s", manifest, err) + } + // Register the base dir of the manifest file as a file source. + // With this we can reference the CSI driver's storageClass + // in the manifest file (FromFile field). + testfiles.AddFileSource(testfiles.RootFileSource{ + Root: filepath.Dir(manifest), + }) + } + } + + return nil +} diff --git a/openshift-hack/cmd/k8s-tests-ext/types.go b/openshift-hack/cmd/k8s-tests-ext/types.go new file mode 100644 index 0000000000000..b43652499537d --- /dev/null +++ b/openshift-hack/cmd/k8s-tests-ext/types.go @@ -0,0 +1,47 @@ +package main + +// copied directly from github.com/openshift/origin/test/extended/util/cluster/cluster.go +type ClusterConfiguration struct { + ProviderName string `json:"type"` + + // These fields (and the "type" tag for ProviderName) chosen to match + // upstream's e2e.CloudConfig. + ProjectID string + Region string + Zone string + NumNodes int + MultiMaster bool + MultiZone bool + Zones []string + ConfigFile string + + // Disconnected is set for test jobs without external internet connectivity + Disconnected bool + + // SingleReplicaTopology is set for disabling disruptive tests or tests + // that require high availability + SingleReplicaTopology bool + + // NetworkPlugin is the "official" plugin name + NetworkPlugin string + // NetworkPluginMode is an optional sub-identifier for the NetworkPlugin. + // (Currently it is only used for OpenShiftSDN.) + NetworkPluginMode string `json:",omitempty"` + + // HasIPv4 and HasIPv6 determine whether IPv4-specific, IPv6-specific, + // and dual-stack-specific tests are run + HasIPv4 bool + HasIPv6 bool + + // HasSCTP determines whether SCTP connectivity tests can be run in the cluster + HasSCTP bool + + // IsProxied determines whether we are accessing the cluster through an HTTP proxy + IsProxied bool + + // IsIBMROKS determines whether the cluster is Managed IBM Cloud (ROKS) + IsIBMROKS bool + + // IsNoOptionalCapabilities indicates the cluster has no optional capabilities enabled + HasNoOptionalCapabilities bool +} diff --git a/openshift-hack/cmd/k8s-tests/k8s-tests.go b/openshift-hack/cmd/k8s-tests/k8s-tests.go new file mode 100644 index 0000000000000..fedd8b16f0141 --- /dev/null +++ b/openshift-hack/cmd/k8s-tests/k8s-tests.go @@ -0,0 +1,98 @@ +package main + +import ( + "encoding/json" + "flag" + "fmt" + "math/rand" + "os" + "sort" + "time" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + utilflag "k8s.io/component-base/cli/flag" + "k8s.io/component-base/logs" + "k8s.io/kubernetes/test/e2e/framework" + + // initialize framework extensions + _ "k8s.io/kubernetes/test/e2e/framework/debug/init" + _ "k8s.io/kubernetes/test/e2e/framework/metrics/init" +) + +func main() { + logs.InitLogs() + defer logs.FlushLogs() + + rand.Seed(time.Now().UTC().UnixNano()) + + pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) + + root := &cobra.Command{ + Long: "OpenShift Tests compatible wrapper", + } + + root.AddCommand( + newRunTestCommand(), + newListTestsCommand(), + ) + + f := flag.CommandLine.Lookup("v") + root.PersistentFlags().AddGoFlag(f) + pflag.CommandLine = pflag.NewFlagSet("empty", pflag.ExitOnError) + flag.CommandLine = flag.NewFlagSet("empty", flag.ExitOnError) + framework.RegisterCommonFlags(flag.CommandLine) + framework.RegisterClusterFlags(flag.CommandLine) + + if err := func() error { + return root.Execute() + }(); err != nil { + if ex, ok := err.(ExitError); ok { + fmt.Fprintf(os.Stderr, "Ginkgo exit error %d: %v\n", ex.Code, err) + os.Exit(ex.Code) + } + fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } +} + +func newRunTestCommand() *cobra.Command { + testOpt := NewTestOptions(os.Stdout, os.Stderr) + + cmd := &cobra.Command{ + Use: "run-test NAME", + Short: "Run a single test by name", + Long: "Execute a single test.", + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + if err := initializeTestFramework(os.Getenv("TEST_PROVIDER")); err != nil { + return err + } + + return testOpt.Run(args) + }, + } + return cmd +} + +func newListTestsCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "List available tests", + Long: "List the available tests in this binary.", + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + tests := testsForSuite() + sort.Slice(tests, func(i, j int) bool { return tests[i].Name < tests[j].Name }) + data, err := json.Marshal(tests) + if err != nil { + return err + } + fmt.Fprintf(os.Stdout, "%s\n", data) + return nil + }, + } + + return cmd +} diff --git a/openshift-hack/cmd/k8s-tests/provider.go b/openshift-hack/cmd/k8s-tests/provider.go new file mode 100644 index 0000000000000..cdc948a45c652 --- /dev/null +++ b/openshift-hack/cmd/k8s-tests/provider.go @@ -0,0 +1,147 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + kclientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/kubernetes/openshift-hack/e2e" + conformancetestdata "k8s.io/kubernetes/test/conformance/testdata" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/testfiles" + "k8s.io/kubernetes/test/e2e/storage/external" + e2etestingmanifests "k8s.io/kubernetes/test/e2e/testing-manifests" + testfixtures "k8s.io/kubernetes/test/fixtures" + + // this appears to inexplicably auto-register global flags. + _ "k8s.io/kubernetes/test/e2e/storage/drivers" + + // these are loading important global flags that we need to get and set + _ "k8s.io/kubernetes/test/e2e" + _ "k8s.io/kubernetes/test/e2e/lifecycle" +) + +// copied directly from github.com/openshift/origin/cmd/openshift-tests/provider.go +// and github.com/openshift/origin/test/extended/util/test.go +func initializeTestFramework(provider string) error { + providerInfo := &ClusterConfiguration{} + if err := json.Unmarshal([]byte(provider), &providerInfo); err != nil { + return fmt.Errorf("provider must be a JSON object with the 'type' key at a minimum: %v", err) + } + if len(providerInfo.ProviderName) == 0 { + return fmt.Errorf("provider must be a JSON object with the 'type' key") + } + config := &ClusterConfiguration{} + if err := json.Unmarshal([]byte(provider), config); err != nil { + return fmt.Errorf("provider must decode into the ClusterConfig object: %v", err) + } + + // update testContext with loaded config + testContext := &framework.TestContext + testContext.Provider = config.ProviderName + testContext.CloudConfig = framework.CloudConfig{ + ProjectID: config.ProjectID, + Region: config.Region, + Zone: config.Zone, + Zones: config.Zones, + NumNodes: config.NumNodes, + MultiMaster: config.MultiMaster, + MultiZone: config.MultiZone, + ConfigFile: config.ConfigFile, + } + testContext.AllowedNotReadyNodes = -1 + testContext.MinStartupPods = -1 + testContext.MaxNodesToGather = 0 + testContext.KubeConfig = os.Getenv("KUBECONFIG") + + // allow the CSI tests to access test data, but only briefly + // TODO: ideally CSI would not use any of these test methods + // var err error + // exutil.WithCleanup(func() { err = initCSITests(dryRun) }) + // TODO: for now I'm only initializing CSI directly, but we probably need that + // WithCleanup here as well + if err := initCSITests(); err != nil { + return err + } + + if ad := os.Getenv("ARTIFACT_DIR"); len(strings.TrimSpace(ad)) == 0 { + os.Setenv("ARTIFACT_DIR", filepath.Join(os.TempDir(), "artifacts")) + } + + testContext.DeleteNamespace = os.Getenv("DELETE_NAMESPACE") != "false" + testContext.VerifyServiceAccount = true + testfiles.AddFileSource(e2etestingmanifests.GetE2ETestingManifestsFS()) + testfiles.AddFileSource(testfixtures.GetTestFixturesFS()) + testfiles.AddFileSource(conformancetestdata.GetConformanceTestdataFS()) + testContext.KubectlPath = "kubectl" + // context.KubeConfig = KubeConfigPath() + testContext.KubeConfig = os.Getenv("KUBECONFIG") + + // "debian" is used when not set. At least GlusterFS tests need "custom". + // (There is no option for "rhel" or "centos".) + testContext.NodeOSDistro = "custom" + testContext.MasterOSDistro = "custom" + + // load and set the host variable for kubectl + clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&clientcmd.ClientConfigLoadingRules{ExplicitPath: testContext.KubeConfig}, &clientcmd.ConfigOverrides{}) + cfg, err := clientConfig.ClientConfig() + if err != nil { + return err + } + testContext.Host = cfg.Host + + // Ensure that Kube tests run privileged (like they do upstream) + testContext.CreateTestingNS = func(ctx context.Context, baseName string, c kclientset.Interface, labels map[string]string) (*corev1.Namespace, error) { + return e2e.CreateTestingNS(ctx, baseName, c, labels, true) + } + + gomega.RegisterFailHandler(ginkgo.Fail) + + framework.AfterReadingAllFlags(testContext) + testContext.DumpLogsOnFailure = true + + // these constants are taken from kube e2e and used by tests + testContext.IPFamily = "ipv4" + if config.HasIPv6 && !config.HasIPv4 { + testContext.IPFamily = "ipv6" + } + + testContext.ReportDir = os.Getenv("TEST_JUNIT_DIR") + + return nil +} + +const ( + manifestEnvVar = "TEST_CSI_DRIVER_FILES" +) + +// copied directly from github.com/openshift/origin/cmd/openshift-tests/csi.go +// Initialize openshift/csi suite, i.e. define CSI tests from TEST_CSI_DRIVER_FILES. +func initCSITests() error { + manifestList := os.Getenv(manifestEnvVar) + if manifestList != "" { + manifests := strings.Split(manifestList, ",") + for _, manifest := range manifests { + if err := external.AddDriverDefinition(manifest); err != nil { + return fmt.Errorf("failed to load manifest from %q: %s", manifest, err) + } + // Register the base dir of the manifest file as a file source. + // With this we can reference the CSI driver's storageClass + // in the manifest file (FromFile field). + testfiles.AddFileSource(testfiles.RootFileSource{ + Root: filepath.Dir(manifest), + }) + } + } + + return nil +} diff --git a/openshift-hack/cmd/k8s-tests/runtest.go b/openshift-hack/cmd/k8s-tests/runtest.go new file mode 100644 index 0000000000000..0abff33438fc3 --- /dev/null +++ b/openshift-hack/cmd/k8s-tests/runtest.go @@ -0,0 +1,143 @@ +package main + +import ( + "fmt" + "io" + "os" + "regexp" + "strings" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" + + "k8s.io/kubernetes/openshift-hack/e2e/annotate/generated" + + // ensure all the ginkgo tests are loaded + _ "k8s.io/kubernetes/openshift-hack/e2e" +) + +// TestOptions handles running a single test. +type TestOptions struct { + Out io.Writer + ErrOut io.Writer +} + +var _ ginkgo.GinkgoTestingT = &TestOptions{} + +func NewTestOptions(out io.Writer, errOut io.Writer) *TestOptions { + return &TestOptions{ + Out: out, + ErrOut: errOut, + } +} + +func (opt *TestOptions) Run(args []string) error { + if len(args) != 1 { + return fmt.Errorf("only a single test name may be passed") + } + + // Ignore the upstream suite behavior within test execution + ginkgo.GetSuite().ClearBeforeAndAfterSuiteNodes() + tests := testsForSuite() + var test *TestCase + for _, t := range tests { + if t.Name == args[0] { + test = t + break + } + } + if test == nil { + return fmt.Errorf("no test exists with that name: %s", args[0]) + } + + suiteConfig, reporterConfig := ginkgo.GinkgoConfiguration() + suiteConfig.FocusStrings = []string{fmt.Sprintf("^ %s$", regexp.QuoteMeta(test.Name))} + + // These settings are matched to upstream's ginkgo configuration. See: + // https://github.com/kubernetes/kubernetes/blob/v1.25.0/test/e2e/framework/test_context.go#L354-L355 + // Randomize specs as well as suites + suiteConfig.RandomizeAllSpecs = true + // https://github.com/kubernetes/kubernetes/blob/v1.25.0/hack/ginkgo-e2e.sh#L172-L173 + suiteConfig.Timeout = 24 * time.Hour + reporterConfig.NoColor = true + reporterConfig.Verbose = true + + ginkgo.SetReporterConfig(reporterConfig) + + cwd, err := os.Getwd() + if err != nil { + return err + } + ginkgo.GetSuite().RunSpec(test.spec, ginkgo.Labels{}, "Kubernetes e2e suite", cwd, ginkgo.GetFailer(), ginkgo.GetWriter(), suiteConfig, reporterConfig) + + var summary types.SpecReport + for _, report := range ginkgo.GetSuite().GetReport().SpecReports { + if report.NumAttempts > 0 { + summary = report + } + } + + switch { + case summary.State == types.SpecStatePassed: + // do nothing + case summary.State == types.SpecStateSkipped: + if len(summary.Failure.Message) > 0 { + fmt.Fprintf(opt.ErrOut, "skip [%s:%d]: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.Message) + } + if len(summary.Failure.ForwardedPanic) > 0 { + fmt.Fprintf(opt.ErrOut, "skip [%s:%d]: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.ForwardedPanic) + } + return ExitError{Code: 3} + case summary.State == types.SpecStateFailed, summary.State == types.SpecStatePanicked, summary.State == types.SpecStateInterrupted: + if len(summary.Failure.ForwardedPanic) > 0 { + if len(summary.Failure.Location.FullStackTrace) > 0 { + fmt.Fprintf(opt.ErrOut, "\n%s\n", summary.Failure.Location.FullStackTrace) + } + fmt.Fprintf(opt.ErrOut, "fail [%s:%d]: Test Panicked: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.ForwardedPanic) + return ExitError{Code: 1} + } + fmt.Fprintf(opt.ErrOut, "fail [%s:%d]: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.Message) + return ExitError{Code: 1} + default: + return fmt.Errorf("unrecognized test case outcome: %#v", summary) + } + return nil +} + +func (opt *TestOptions) Fail() { + // this function allows us to pass TestOptions as the first argument, + // it's empty becase we have failure check mechanism implemented above. +} + +func lastFilenameSegment(filename string) string { + if parts := strings.Split(filename, "/vendor/"); len(parts) > 1 { + return parts[len(parts)-1] + } + if parts := strings.Split(filename, "/src/"); len(parts) > 1 { + return parts[len(parts)-1] + } + return filename +} + +func testsForSuite() []*TestCase { + var tests []*TestCase + + // Don't build the tree multiple times, it results in multiple initing of tests + if !ginkgo.GetSuite().InPhaseBuildTree() { + ginkgo.GetSuite().BuildTree() + } + + ginkgo.GetSuite().WalkTests(func(name string, spec types.TestSpec) { + testCase := &TestCase{ + Name: spec.Text(), + locations: spec.CodeLocations(), + spec: spec, + } + if labels, ok := generated.Annotations[name]; ok { + testCase.Labels = labels + } + tests = append(tests, testCase) + }) + return tests +} diff --git a/openshift-hack/cmd/k8s-tests/types.go b/openshift-hack/cmd/k8s-tests/types.go new file mode 100644 index 0000000000000..29a0b5b5efa4e --- /dev/null +++ b/openshift-hack/cmd/k8s-tests/types.go @@ -0,0 +1,69 @@ +package main + +import ( + "fmt" + + "github.com/onsi/ginkgo/v2/types" +) + +// copied directly from github.com/openshift/origin/test/extended/util/cluster/cluster.go +type ClusterConfiguration struct { + ProviderName string `json:"type"` + + // These fields (and the "type" tag for ProviderName) chosen to match + // upstream's e2e.CloudConfig. + ProjectID string + Region string + Zone string + NumNodes int + MultiMaster bool + MultiZone bool + Zones []string + ConfigFile string + + // Disconnected is set for test jobs without external internet connectivity + Disconnected bool + + // SingleReplicaTopology is set for disabling disruptive tests or tests + // that require high availability + SingleReplicaTopology bool + + // NetworkPlugin is the "official" plugin name + NetworkPlugin string + // NetworkPluginMode is an optional sub-identifier for the NetworkPlugin. + // (Currently it is only used for OpenShiftSDN.) + NetworkPluginMode string `json:",omitempty"` + + // HasIPv4 and HasIPv6 determine whether IPv4-specific, IPv6-specific, + // and dual-stack-specific tests are run + HasIPv4 bool + HasIPv6 bool + + // HasSCTP determines whether SCTP connectivity tests can be run in the cluster + HasSCTP bool + + // IsProxied determines whether we are accessing the cluster through an HTTP proxy + IsProxied bool + + // IsIBMROKS determines whether the cluster is Managed IBM Cloud (ROKS) + IsIBMROKS bool + + // IsNoOptionalCapabilities indicates the cluster has no optional capabilities enabled + HasNoOptionalCapabilities bool +} + +// copied directly from github.com/openshift/origin/pkg/test/ginkgo/test.go +type TestCase struct { + Name string + Labels string + spec types.TestSpec + locations []types.CodeLocation +} + +type ExitError struct { + Code int +} + +func (e ExitError) Error() string { + return fmt.Sprintf("exit with code %d", e.Code) +} diff --git a/openshift-hack/conformance-k8s.sh b/openshift-hack/conformance-k8s.sh new file mode 100755 index 0000000000000..a2680b7f7f270 --- /dev/null +++ b/openshift-hack/conformance-k8s.sh @@ -0,0 +1,96 @@ +#!/bin/bash +# +# Runs the Kubernetes conformance suite against an OpenShift cluster +# +# Test prerequisites: +# +# * all nodes that users can run workloads under marked as schedulable +# +source "$(dirname "${BASH_SOURCE[0]}")/lib/init.sh" + +# Check inputs +if [[ -z "${KUBECONFIG-}" ]]; then + os::log::fatal "KUBECONFIG must be set to a root account" +fi +test_report_dir="${ARTIFACT_DIR}" +mkdir -p "${test_report_dir}" + +cat < "${test_report_dir}/README.md" +This conformance report is generated by the OpenShift CI infrastructure. The canonical source location for this test script is located at https://github.com/openshift/kubernetes/blob/master/openshift-hack/conformance-k8s.sh + +This file was generated by: + + Commit $( git rev-parse HEAD || "" ) + Tag $( git describe || "" ) + +To recreate these results + +1. Install an [OpenShift cluster](https://docs.openshift.com/container-platform/) +2. Retrieve a \`.kubeconfig\` file with administrator credentials on that cluster and set the environment variable KUBECONFIG + + export KUBECONFIG=PATH_TO_KUBECONFIG + +3. Clone the OpenShift source repository and change to that directory: + + git clone https://github.com/openshift/kubernetes.git + cd kubernetes + +4. Place the \`oc\` binary for that cluster in your PATH +5. Run the conformance test: + + openshift-hack/conformance-k8s.sh + +Nightly conformance tests are run against release branches and reported https://openshift-gce-devel.appspot.com/builds/origin-ci-test/logs/periodic-ci-origin-conformance-k8s/ +END + +version="$(sed -rn 's/.*io.openshift.build.versions="kubernetes=(1.[0-9]+.[0-9]+(-rc.[0-9])?)"/v\1/p' openshift-hack/images/hyperkube/Dockerfile.rhel)" +os::log::info "Running Kubernetes conformance suite for ${version}" + +# Execute OpenShift prerequisites +# Disable container security +oc adm policy add-scc-to-group privileged system:authenticated system:serviceaccounts +oc adm policy add-scc-to-group anyuid system:authenticated system:serviceaccounts +unschedulable="$( ( oc get nodes -o name -l 'node-role.kubernetes.io/master'; ) | wc -l )" +# TODO: undo these operations + +# Execute Kubernetes prerequisites +make WHAT=cmd/kubectl +make WHAT=test/e2e/e2e.test +make WHAT=vendor/github.com/onsi/ginkgo/v2/ginkgo +PATH="${OS_ROOT}/_output/local/bin/$( os::build::host_platform ):${PATH}" +export PATH + +kubectl version > "${test_report_dir}/version.txt" +echo "-----" >> "${test_report_dir}/version.txt" +oc version >> "${test_report_dir}/version.txt" + +# Run the test, serial tests first, then parallel + +rc=0 + +e2e_test="$( which e2e.test )" + +# shellcheck disable=SC2086 +ginkgo \ + -nodes 1 -no-color '-focus=(\[Conformance\].*\[Serial\]|\[Serial\].*\[Conformance\])' \ + ${e2e_test} -- \ + -report-dir "${test_report_dir}" \ + -allowed-not-ready-nodes ${unschedulable} \ + 2>&1 | tee -a "${test_report_dir}/e2e.log" || rc=1 + +rename -v junit_ junit_serial_ "${test_report_dir}"/junit*.xml + +# shellcheck disable=SC2086 +ginkgo \ + --timeout="24h" \ + --output-interceptor-mode=none \ + -nodes 4 -no-color '-skip=\[Serial\]' '-focus=\[Conformance\]' \ + ${e2e_test} -- \ + -report-dir "${test_report_dir}" \ + -allowed-not-ready-nodes ${unschedulable} \ + 2>&1 | tee -a "${test_report_dir}/e2e.log" || rc=1 + +echo +echo "Run complete, results in ${test_report_dir}" + +exit $rc diff --git a/openshift-hack/create-or-update-rebase-branch.sh b/openshift-hack/create-or-update-rebase-branch.sh new file mode 100755 index 0000000000000..c948eb874850a --- /dev/null +++ b/openshift-hack/create-or-update-rebase-branch.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash + +set -o nounset +set -o errexit +set -o pipefail + +# This script is intended to simplify the maintaining a rebase branch for +# openshift/kubernetes. +# +# - If the branch named by REBASE_BRANCH does not exist, it will be created by +# branching from UPSTREAM_TAG and merging in TARGET_BRANCH with strategy +# 'ours'. +# +# - If the branch named by REBASE_BRANCH exists, it will be renamed to +# -, a new branch will be created as per above, and +# carries from the renamed branch will be cherry-picked. + +UPSTREAM_TAG="${UPSTREAM_TAG:-}" +if [[ -z "${UPSTREAM_TAG}" ]]; then + echo >&2 "UPSTREAM_TAG is required" + exit 1 +fi + +REBASE_BRANCH="${REBASE_BRANCH:-}" +if [[ -z "${REBASE_BRANCH}" ]]; then + echo >&2 "REBASE_BRANCH is required" + exit 1 +fi + +TARGET_BRANCH="${TARGET_BRANCH:-master}" +if [[ -z "${TARGET_BRANCH}" ]]; then + echo >&2 "TARGET_BRANCH is required" + exit 1 +fi + +echo "Ensuring target branch '${TARGET_BRANCH} is updated" +git co "${TARGET_BRANCH}" +git pull + +echo "Checking if '${REBASE_BRANCH}' exists" +REBASE_IN_PROGRESS= +if git show-ref --verify --quiet "refs/heads/${REBASE_BRANCH}"; then + REBASE_IN_PROGRESS=y +fi + +# If a rebase is in progress, rename the existing branch +if [[ "${REBASE_IN_PROGRESS}" ]]; then + TIMESTAMP="$(date +"%Y-%m-%d_%H-%M-%S")" + PREVIOUS_REBASE_BRANCH="${REBASE_BRANCH}.${TIMESTAMP}" + echo "Renaming rebase branch '${REBASE_BRANCH}' to '${PREVIOUS_REBASE_BRANCH}'" + git br -m "${REBASE_BRANCH}" "${PREVIOUS_REBASE_BRANCH}" +fi + +echo "Branching upstream tag '${UPSTREAM_TAG}' to rebase branch '${REBASE_BRANCH}'" +git co -b "${REBASE_BRANCH}" "${UPSTREAM_TAG}" + +echo "Merging target branch '${TARGET_BRANCH}' to rebase branch '${REBASE_BRANCH}'" +git merge -s ours --no-edit "${TARGET_BRANCH}" + +if [[ "${REBASE_IN_PROGRESS}" ]]; then + echo "Cherry-picking carried commits from previous rebase branch '${PREVIOUS_REBASE_BRANCH}'" + # The first merge in the previous rebase branch should be the point at which + # the target branch was merged with the upstream tag. Any commits since this + # merge should be cherry-picked. + MERGE_SHA="$(git log --pretty=%H --merges --max-count=1 "${PREVIOUS_REBASE_BRANCH}" )" + git cherry-pick "${MERGE_SHA}..${PREVIOUS_REBASE_BRANCH}" +fi diff --git a/openshift-hack/e2e/annotate/annotate.go b/openshift-hack/e2e/annotate/annotate.go new file mode 100644 index 0000000000000..096ae2a00aa96 --- /dev/null +++ b/openshift-hack/e2e/annotate/annotate.go @@ -0,0 +1,290 @@ +package annotate + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "regexp" + "sort" + "strings" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" +) + +var reHasSig = regexp.MustCompile(`\[sig-[\w-]+\]`) + +// Run generates tests annotations for the targeted package. +// It accepts testMaps which defines labeling rules and filter +// function to remove elements based on test name and their labels. +func Run(testMaps map[string][]string, filter func(name string) bool) { + var errors []string + + if len(os.Args) != 2 && len(os.Args) != 3 { + fmt.Fprintf(os.Stderr, "error: requires exactly one argument\n") + os.Exit(1) + } + filename := os.Args[len(os.Args)-1] + + generator := newGenerator(testMaps) + ginkgo.GetSuite().BuildTree() + ginkgo.GetSuite().WalkTests(generator.generateRename) + if len(generator.errors) > 0 { + errors = append(errors, generator.errors...) + } + + renamer := newRenamerFromGenerated(generator.output) + // generated file has a map[string]string in the following format: + // original k8s name: k8s name with our labels at the end + ginkgo.GetSuite().WalkTests(renamer.updateNodeText) + if len(renamer.missing) > 0 { + var names []string + for name := range renamer.missing { + names = append(names, name) + } + sort.Strings(names) + fmt.Fprintf(os.Stderr, "failed:\n%s\n", strings.Join(names, "\n")) + os.Exit(1) + } + + // All tests must be associated with a sig (either upstream), or downstream + // If you get this error, you should add the [sig-X] tag to your test (if its + // in origin) or if it is upstream add a new rule to rules.go that assigns + // the test in question to the right sig. + // + // Upstream sigs map to teams (if you have representation on that sig, you + // own those tests in origin) + // Downstream sigs: sig-imageregistry, sig-builds, sig-devex + for from, to := range generator.output { + if !reHasSig.MatchString(from) && !reHasSig.MatchString(to) { + errors = append(errors, fmt.Sprintf("all tests must define a [sig-XXXX] tag or have a rule %q", from)) + } + } + if len(errors) > 0 { + sort.Strings(errors) + for _, s := range errors { + fmt.Fprintf(os.Stderr, "failed: %s\n", s) + } + os.Exit(1) + } + + var pairs []string + for testName, labels := range generator.output { + if filter(fmt.Sprintf("%s%s", testName, labels)) { + continue + } + pairs = append(pairs, fmt.Sprintf("%q:\n%q,", testName, labels)) + } + sort.Strings(pairs) + contents := fmt.Sprintf(` +package generated + +import ( + "fmt" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" +) + +var Annotations = map[string]string{ +%s +} + +func init() { + ginkgo.GetSuite().SetAnnotateFn(func(name string, node types.TestSpec) { + if newLabels, ok := Annotations[name]; ok { + node.AppendText(newLabels) + } else { + panic(fmt.Sprintf("unable to find test %%s", name)) + } + }) +} +`, strings.Join(pairs, "\n\n")) + if err := ioutil.WriteFile(filename, []byte(contents), 0644); err != nil { + fmt.Fprintf(os.Stderr, "error: %v", err) + os.Exit(1) + } + if _, err := exec.Command("gofmt", "-s", "-w", filename).Output(); err != nil { + fmt.Fprintf(os.Stderr, "error: %v", err) + os.Exit(1) + } +} + +func newGenerator(testMaps map[string][]string) *ginkgoTestRenamer { + var allLabels []string + matches := make(map[string]*regexp.Regexp) + stringMatches := make(map[string][]string) + + for label, items := range testMaps { + sort.Strings(items) + allLabels = append(allLabels, label) + var remain []string + for _, item := range items { + re := regexp.MustCompile(item) + if p, ok := re.LiteralPrefix(); ok { + stringMatches[label] = append(stringMatches[label], p) + } else { + remain = append(remain, item) + } + } + if len(remain) > 0 { + matches[label] = regexp.MustCompile(strings.Join(remain, `|`)) + } + } + sort.Strings(allLabels) + + excludedTestsFilter := regexp.MustCompile(strings.Join(ExcludedTests, `|`)) + + return &ginkgoTestRenamer{ + allLabels: allLabels, + stringMatches: stringMatches, + matches: matches, + excludedTestsFilter: excludedTestsFilter, + output: make(map[string]string), + } +} + +func newRenamerFromGenerated(names map[string]string) *ginkgoTestRenamer { + return &ginkgoTestRenamer{ + output: names, + missing: make(map[string]struct{}), + } +} + +type ginkgoTestRenamer struct { + // keys defined in TestMaps in openshift-hack/e2e/annotate/rules.go + allLabels []string + // exact substrings to match to apply a particular label + stringMatches map[string][]string + // regular expressions to match to apply a particular label + matches map[string]*regexp.Regexp + // regular expression excluding permanently a set of tests + // see ExcludedTests in openshift-hack/e2e/annotate/rules.go + excludedTestsFilter *regexp.Regexp + + // output from the generateRename and also input for updateNodeText + output map[string]string + // map of unmatched test names + missing map[string]struct{} + // a list of errors to display + errors []string +} + +func (r *ginkgoTestRenamer) updateNodeText(name string, node types.TestSpec) { + if newLables, ok := r.output[name]; ok { + node.AppendText(newLables) + } else { + r.missing[name] = struct{}{} + } +} + +func (r *ginkgoTestRenamer) generateRename(name string, node types.TestSpec) { + newLabels := "" + newName := name + for { + count := 0 + for _, label := range r.allLabels { + // never apply a sig label twice + if strings.HasPrefix(label, "[sig-") && strings.Contains(newName, "[sig-") { + continue + } + if strings.Contains(newName, label) { + continue + } + + var hasLabel bool + for _, segment := range r.stringMatches[label] { + hasLabel = strings.Contains(newName, segment) + if hasLabel { + break + } + } + if !hasLabel { + if re := r.matches[label]; re != nil { + hasLabel = r.matches[label].MatchString(newName) + } + } + + if hasLabel { + count++ + newLabels += " " + label + newName += " " + label + } + } + if count == 0 { + break + } + } + + // Append suite name to test, if it doesn't already have one + if !r.excludedTestsFilter.MatchString(newName) && !strings.Contains(newName, "[Suite:") { + isSerial := strings.Contains(newName, "[Serial]") + isConformance := strings.Contains(newName, "[Conformance]") + switch { + case isSerial && isConformance: + newLabels += " [Suite:openshift/conformance/serial/minimal]" + case isSerial: + newLabels += " [Suite:openshift/conformance/serial]" + case isConformance: + newLabels += " [Suite:openshift/conformance/parallel/minimal]" + default: + newLabels += " [Suite:openshift/conformance/parallel]" + } + } + codeLocations := node.CodeLocations() + if isGoModulePath(codeLocations[len(codeLocations)-1].FileName, "k8s.io/kubernetes", "test/e2e") { + newLabels += " [Suite:k8s]" + } + + if err := checkBalancedBrackets(newName); err != nil { + r.errors = append(r.errors, err.Error()) + } + r.output[name] = newLabels +} + +// isGoModulePath returns true if the packagePath reported by reflection is within a +// module and given module path. When go mod is in use, module and modulePath are not +// contiguous as they were in older golang versions with vendoring, so naive contains +// tests fail. +// +// historically: ".../vendor/k8s.io/kubernetes/test/e2e" +// go.mod: "k8s.io/kubernetes@0.18.4/test/e2e" +func isGoModulePath(packagePath, module, modulePath string) bool { + return regexp.MustCompile(fmt.Sprintf(`\b%s(@[^/]*|)/%s\b`, regexp.QuoteMeta(module), regexp.QuoteMeta(modulePath))).MatchString(packagePath) +} + +// checkBalancedBrackets ensures that square brackets are balanced in generated test +// names. If they are not, it returns an error with the name of the test and a guess +// where the unmatched bracket(s) are. +func checkBalancedBrackets(testName string) error { + stack := make([]int, 0, len(testName)) + for idx, c := range testName { + if c == '[' { + stack = append(stack, idx) + } else if c == ']' { + // case when we start off with a ] + if len(stack) == 0 { + stack = append(stack, idx) + } else { + stack = stack[:len(stack)-1] + } + } + } + + if len(stack) > 0 { + msg := testName + "\n" + outerLoop: + for i := 0; i < len(testName); i++ { + for _, loc := range stack { + if i == loc { + msg += "^" + continue outerLoop + } + } + msg += " " + } + return fmt.Errorf("unbalanced brackets in test name:\n%s\n", msg) + } + + return nil +} diff --git a/openshift-hack/e2e/annotate/annotate_test.go b/openshift-hack/e2e/annotate/annotate_test.go new file mode 100644 index 0000000000000..614c902e29e44 --- /dev/null +++ b/openshift-hack/e2e/annotate/annotate_test.go @@ -0,0 +1,55 @@ +package annotate + +import ( + "fmt" + "os" + "testing" +) + +func Test_checkBalancedBrackets(t *testing.T) { + tests := []struct { + testCase string + testName string + wantErr bool + }{ + { + testCase: "balanced brackets succeeds", + testName: "[sig-storage] Test that storage [apigroup:storage.openshift.io] actually works [Driver:azure][Serial][Late]", + wantErr: false, + }, + { + testCase: "unbalanced brackets errors", + testName: "[sig-storage] Test that storage [apigroup:storage.openshift.io actually works [Driver:azure][Serial][Late]", + wantErr: true, + }, + { + testCase: "start with close bracket errors", + testName: "[sig-storage] test with a random bracket ]", + wantErr: true, + }, + { + testCase: "multiple unbalanced brackets errors", + testName: "[sig-storage Test that storage [apigroup:storage.openshift.io actually works [Driver:azure]", + wantErr: true, + }, + { + testCase: "balanced deeply nested brackets succeeds", + testName: "[[[[[[some weird test with deeply nested brackets]]]]]]", + wantErr: false, + }, + { + testCase: "unbalanced deeply nested brackets errors", + testName: "[[[[[[some weird test with deeply nested brackets]]]]]", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.testCase, func(t *testing.T) { + if err := checkBalancedBrackets(tt.testName); (err != nil) != tt.wantErr { + t.Errorf("checkBalancedBrackets() error = %v, wantErr %v", err, tt.wantErr) + } else if err != nil { + fmt.Fprintf(os.Stderr, "checkBalancedBrackets() success, found expected err = \n%s\n", err.Error()) + } + }) + } +} diff --git a/openshift-hack/e2e/annotate/cmd/main.go b/openshift-hack/e2e/annotate/cmd/main.go new file mode 100644 index 0000000000000..c1666ce9e045b --- /dev/null +++ b/openshift-hack/e2e/annotate/cmd/main.go @@ -0,0 +1,9 @@ +package main + +import ( + "k8s.io/kubernetes/openshift-hack/e2e/annotate" +) + +func main() { + annotate.Run(annotate.TestMaps, func(name string) bool { return false }) +} diff --git a/openshift-hack/e2e/annotate/generated/zz_generated.annotations.go b/openshift-hack/e2e/annotate/generated/zz_generated.annotations.go new file mode 100644 index 0000000000000..dc236fc7e082b --- /dev/null +++ b/openshift-hack/e2e/annotate/generated/zz_generated.annotations.go @@ -0,0 +1,13255 @@ +package generated + +import ( + "fmt" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" +) + +var Annotations = map[string]string{ + "[sig-api-machinery] API Streaming (aka. WatchList) [Serial] doesn't support receiving resources as Tables": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-api-machinery] API Streaming (aka. WatchList) [Serial] falls backs to supported content type when when receiving resources as Tables was requested": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-api-machinery] API Streaming (aka. WatchList) [Serial] should be requested by client-go's List method when WatchListClient is enabled": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-api-machinery] API Streaming (aka. WatchList) [Serial] should be requested by dynamic client's List method when WatchListClient is enabled": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-api-machinery] API Streaming (aka. WatchList) [Serial] should be requested by informers when WatchListClient is enabled": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-api-machinery] API Streaming (aka. WatchList) [Serial] should be requested by metadata client's List method when WatchListClient is enabled": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-api-machinery] API priority and fairness should ensure that requests can be classified by adding FlowSchema and PriorityLevelConfiguration": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] API priority and fairness should ensure that requests can't be drowned out (fairness)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] API priority and fairness should ensure that requests can't be drowned out (priority)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] API priority and fairness should support FlowSchema API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] API priority and fairness should support PriorityLevelConfiguration API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing mutating webhooks should work [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing validating webhooks should work [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] patching/updating a mutating webhook should work [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] patching/updating a validating webhook should work [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to create and update mutating webhook configurations with match conditions [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to create and update validating webhook configurations with match conditions [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny attaching pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny custom resource creation, update and deletion [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny pod and configmap creation [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should deny crd creation [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should honor timeout [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should include webhook resources in discovery documents [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate configmap [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource with different stored version [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource with pruning [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate everything except 'skip-me' configmaps [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate pod and apply defaults after mutation [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should not be able to mutate or prevent deletion of webhook configuration objects [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should reject mutating webhook configurations with invalid match conditions [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should reject validating webhook configurations with invalid match conditions [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should unconditionally reject operations on fail closed webhook [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AggregatedDiscovery should support aggregated discovery interface [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AggregatedDiscovery should support aggregated discovery interface for CRDs [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AggregatedDiscovery should support raw aggregated discovery endpoint Accept headers [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AggregatedDiscovery should support raw aggregated discovery request for CRDs [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Aggregator Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CBOR [Feature:CBOR] clients remain compatible with the 1.17 sample-apiserver [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST NOT fail to update a resource due to CRD Validation Rule errors on unchanged correlatable fields": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST NOT fail to update a resource due to JSONSchema errors on unchanged correlatable fields": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST NOT ratchet errors raised by transition rules": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST evaluate a CRD Validation Rule with oldSelf = nil for new values when optionalOldSelf is true": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST fail to update a resource due to CRD Validation Rule errors on changed fields": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST fail to update a resource due to CRD Validation Rule errors on unchanged uncorrelatable fields": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST fail to update a resource due to JSONSchema errors on changed fields": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST fail to update a resource due to JSONSchema errors on unchanged uncorrelatable fields": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] should be able to convert a non homogeneous list of CRs [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] should be able to convert from CR v1 to CR v2 [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] CustomResourceDefinition Watch watch on custom resource definition objects [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition creating/deleting custom resource definition objects works [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition getting/updating/patching custom resource definition status sub-resource works [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition listing custom resource definition objects works [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] custom resource defaulting for requests and from storage works [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] should include custom resource definition resources in discovery documents [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceFieldSelectors [Privileged:ClusterAdmin] CustomResourceFieldSelectors MUST list and watch custom resources matching the field selector [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] [Flaky] kubectl explain works for CR with the same resource name as built-in object.": " [Suite:k8s]", + + "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] removes definition from spec when one version gets changed to not be served [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] updates the published spec when one version gets renamed [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD preserving unknown fields at the schema root [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD preserving unknown fields in an embedded object [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD with validation schema [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD without validation schema [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of different groups [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group and version but different kinds [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group but different versions [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST NOT fail validation for create of a custom resource that satisfies the x-kubernetes-validations rules": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail create of a custom resource definition that contains a x-kubernetes-validations rule that refers to a property that do not exist": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail create of a custom resource definition that contains an x-kubernetes-validations rule that contains a syntax error": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail create of a custom resource definition that contains an x-kubernetes-validations rule that exceeds the estimated cost limit": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail create of a custom resource that exceeds the runtime cost limit for x-kubernetes-validations rule execution": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail update of a custom resource that does not satisfy a x-kubernetes-validations transition rule": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail validation for create of a custom resource that does not satisfy the x-kubernetes-validations rules": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Discovery Custom resource should have storage version hash": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Discovery should accurately determine present and missing resources": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Discovery should locate the groupVersion and a resource within each APIGroup [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Discovery should validate PreferredVersion for each APIGroup [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Etcd failure [Disruptive] should recover from SIGKILL": " [Serial] [Suite:k8s]", + + "[sig-api-machinery] Etcd failure [Disruptive] should recover from network partition with master": " [Serial] [Suite:k8s]", + + "[sig-api-machinery] FieldValidation should create/apply a CR with unknown fields for CRD with no validation schema [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] FieldValidation should create/apply a valid CR for CRD with validation schema [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] FieldValidation should create/apply an invalid CR with extra properties for CRD with validation schema [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] FieldValidation should detect duplicates in a CR when preserving unknown fields [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] FieldValidation should detect unknown and duplicate fields of a typed object [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] FieldValidation should detect unknown metadata fields in both the root and embedded object of a CR [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] FieldValidation should detect unknown metadata fields of a typed object [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Garbage collector should delete RS created by deployment when not orphaning [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Garbage collector should delete jobs and pods created by cronjob": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Garbage collector should delete pods created by rc when not orphaning [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Garbage collector should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Garbage collector should not be blocked by dependency circle [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Garbage collector should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Garbage collector should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Garbage collector should orphan pods created by rc if delete options say so [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Garbage collector should orphan pods created by rc if deleteOptions.OrphanDependents is nil": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Garbage collector should support cascading deletion of custom resources": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Garbage collector should support orphan deletion of custom resources": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Generated clientset should create pods, set the deletionTimestamp and deletionGracePeriodSeconds of the pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Generated clientset should create v1 cronJobs, delete cronJobs, watch cronJobs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Namespaces [Serial] should always delete fast (ALL of 100 namespaces in 150 seconds) [Feature:ComprehensiveNamespaceDraining]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-api-machinery] Namespaces [Serial] should apply a finalizer to a Namespace [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-api-machinery] Namespaces [Serial] should apply an update to a Namespace [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-api-machinery] Namespaces [Serial] should apply changes to a namespace status [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-api-machinery] Namespaces [Serial] should delete fast enough (90 percent of 100 namespaces in 150 seconds)": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-api-machinery] Namespaces [Serial] should ensure that all pods are removed when a namespace is deleted [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-api-machinery] Namespaces [Serial] should ensure that all services are removed when a namespace is deleted [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-api-machinery] Namespaces [Serial] should patch a Namespace [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-api-machinery] OpenAPIV3 should contain OpenAPI V3 for Aggregated APIServer [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-api-machinery] OpenAPIV3 should publish OpenAPI V3 for CustomResourceDefinition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] OpenAPIV3 should round trip OpenAPI V3 for all built-in group versions": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists).": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn).": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should apply changes to a resourcequota status [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should be able to update and delete ResourceQuota. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a ResourceClaim [Feature:DynamicResourceAllocation]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a configMap. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a custom resource.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim with a storage class": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a pod. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replica set. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replication controller. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a secret. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a service. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should create a ResourceQuota and ensure its status is promptly calculated. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should manage the lifecycle of a ResourceQuota [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should verify ResourceQuota with best effort scope using scope-selectors.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should verify ResourceQuota with best effort scope. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should verify ResourceQuota with cross namespace pod affinity scope using scope-selectors.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should verify ResourceQuota with terminating scopes through scope selectors.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should verify ResourceQuota with terminating scopes. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Server request timeout default timeout should be used if the specified timeout in the request URL is 0s": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Server request timeout should return HTTP status code 400 if the user specifies an invalid timeout in the request URL": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Server request timeout the request should be served with a default timeout if the specified timeout in the request URL exceeds maximum allowed": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ServerSideApply should create an applied object if it does not already exist": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ServerSideApply should give up ownership of a field if forced applied by a controller": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ServerSideApply should ignore conflict errors if force apply is used": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ServerSideApply should not remove a field if an owner unsets the field but other managers still have ownership of the field": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ServerSideApply should remove a field if it is owned but removed in the apply request": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ServerSideApply should work for CRDs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ServerSideApply should work for subresources": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Servers with support for API chunking should return chunks of results for list calls [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Servers with support for API chunking should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent [Slow] [Conformance]": " [Suite:k8s]", + + "[sig-api-machinery] Servers with support for Table transformation should return a 406 for a backend which does not implement metadata [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Servers with support for Table transformation should return chunks of table results for list calls": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Servers with support for Table transformation should return generic metadata details across all namespaces for nodes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Servers with support for Table transformation should return pod details": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] StorageVersion resources [Feature:StorageVersionAPI] storage version with non-existing id should be GC'ed": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should allow expressions to refer variables. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should support ValidatingAdmissionPolicy API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should support ValidatingAdmissionPolicyBinding API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should type check a CRD": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should type check validation expressions": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should validate against a Deployment [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Watchers should be able to restart watching from the last resource version observed by the previous watch [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Watchers should be able to start watching from a specific resource version [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Watchers should observe add, update, and delete watch notifications on configmaps [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Watchers should observe an object deletion if it stops meeting the requirements of the selector [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Watchers should receive events on concurrent watches in same order [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/json,application/vnd.kubernetes.protobuf\"": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/json\"": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/vnd.kubernetes.protobuf,application/json\"": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/vnd.kubernetes.protobuf\"": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] health handlers should contain necessary checks": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] kube-apiserver identity [Feature:APIServerIdentity] kube-apiserver identity should persist after restart [Disruptive]": " [Serial] [Suite:k8s]", + + "[sig-api-machinery] server version should find the server version [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ControllerRevision [Serial] should manage the lifecycle of a ControllerRevision [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-apps] CronJob should be able to schedule after more than 100 missed schedule": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] CronJob should delete failed finished jobs with limit of one job": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] CronJob should delete successful finished jobs with limit of one successful job": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] CronJob should not emit unexpected warnings": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] CronJob should not schedule jobs when suspended [Slow] [Conformance]": " [Suite:k8s]", + + "[sig-apps] CronJob should not schedule new jobs when ForbidConcurrent [Slow] [Conformance]": " [Suite:k8s]", + + "[sig-apps] CronJob should remove from active list jobs that have been deleted": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] CronJob should replace jobs when ReplaceConcurrent [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] CronJob should schedule multiple jobs concurrently [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] CronJob should set the cronjob-scheduled-timestamp annotation on a job": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] CronJob should support CronJob API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] CronJob should support timezone": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Daemon set [Serial] should list and delete a collection of DaemonSets [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-apps] Daemon set [Serial] should not update pod when spec was updated and update strategy is OnDelete": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-apps] Daemon set [Serial] should retry creating failed daemon pods [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-apps] Daemon set [Serial] should rollback without unnecessary restarts [Conformance]": " [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-apps] Daemon set [Serial] should run and stop complex daemon [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-apps] Daemon set [Serial] should run and stop complex daemon with node affinity": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-apps] Daemon set [Serial] should run and stop simple daemon [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-apps] Daemon set [Serial] should surge pods onto nodes when spec was updated and update strategy is RollingUpdate": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-apps] Daemon set [Serial] should update pod when spec was updated and update strategy is RollingUpdate [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-apps] Daemon set [Serial] should verify changes to a daemon set status [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-apps] DaemonRestart [Disruptive] Controller Manager should not create/delete replicas across restart": " [Serial] [Suite:k8s]", + + "[sig-apps] DaemonRestart [Disruptive] Kube-proxy should recover after being killed accidentally": " [Serial] [Suite:k8s]", + + "[sig-apps] DaemonRestart [Disruptive] Kubelet should not restart containers across restart": " [Serial] [Suite:k8s]", + + "[sig-apps] DaemonRestart [Disruptive] Scheduler should continue assigning pods to nodes across restart": " [Serial] [Suite:k8s]", + + "[sig-apps] Deployment Deployment should have a working scale subresource [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Deployment RecreateDeployment should delete old pods and create new ones [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Deployment RollingUpdateDeployment should delete old pods and create new ones [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Deployment deployment reaping should cascade to its replica sets and pods": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Deployment deployment should delete old replica sets [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Deployment deployment should support proportional scaling [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Deployment deployment should support rollover [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Deployment iterative rollouts should eventually progress": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Deployment should not disrupt a cloud load-balancer's connectivity during rollout": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Deployment should run the lifecycle of a Deployment [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Deployment should validate Deployment Status endpoints [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Deployment test Deployment ReplicaSet orphaning and adoption regarding controllerRef": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] DisruptionController Listing PodDisruptionBudgets for all namespaces should list and delete a collection of PodDisruptionBudgets [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] DisruptionController evictions: enough pods, absolute => should allow an eviction": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] DisruptionController evictions: enough pods, replicaSet, percentage => should allow an eviction": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] DisruptionController evictions: maxUnavailable allow single eviction, percentage => should allow an eviction": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] DisruptionController evictions: maxUnavailable deny evictions, integer => should not allow an eviction [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-apps] DisruptionController evictions: no PDB => should allow an eviction": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] DisruptionController evictions: too few pods, absolute => should not allow an eviction": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] DisruptionController evictions: too few pods, replicaSet, percentage => should not allow an eviction [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-apps] DisruptionController should block an eviction until the PDB is updated to allow it [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] DisruptionController should create a PodDisruptionBudget [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] DisruptionController should evict ready pods with AlwaysAllow UnhealthyPodEvictionPolicy": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] DisruptionController should evict ready pods with Default UnhealthyPodEvictionPolicy": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] DisruptionController should evict ready pods with IfHealthyBudget UnhealthyPodEvictionPolicy": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] DisruptionController should evict unready pods with AlwaysAllow UnhealthyPodEvictionPolicy": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] DisruptionController should not evict unready pods with Default UnhealthyPodEvictionPolicy": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] DisruptionController should not evict unready pods with IfHealthyBudget UnhealthyPodEvictionPolicy": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] DisruptionController should observe PodDisruptionBudget status updated [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] DisruptionController should observe that the PodDisruptionBudget status is not updated for unmanaged pods": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] DisruptionController should update/patch PodDisruptionBudget status [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Job should adopt matching orphans and release non-matching pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Job should allow to delegate reconciliation to external controller": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should allow to use a pod failure policy to ignore failure matching on DisruptionTarget condition [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Job should allow to use a pod failure policy to ignore failure matching on exit code": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should allow to use the pod failure policy on exit code to fail the job early [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Job should apply changes to a job status [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Job should create pods for an Indexed job with completion indexes and specified hostname [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Job should create pods with completion indexes for an Indexed Job [Feature:PodIndexLabel]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should delete a job [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Job should delete pods when suspended": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should execute all indexes despite some failing when using backoffLimitPerIndex": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should fail to exceed backoffLimit": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should fail when exceeds active deadline": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should manage the lifecycle of a job [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Job should mark indexes as failed when the FailIndex action is matched in podFailurePolicy": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should not create pods when created in suspend state": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should recreate pods only after they have failed if pod replacement policy is set to Failed": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should remove pods when job is deleted": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should run a job to completion when tasks sometimes fail and are locally restarted [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Job should run a job to completion when tasks sometimes fail and are not locally restarted": " [Flaky] [Suite:k8s]", + + "[sig-apps] Job should run a job to completion when tasks succeed": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should run a job to completion with CPU requests [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-apps] Job should terminate job execution when the number of failed indexes exceeds maxFailedIndexes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should update the status ready field": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job with successPolicy should succeeded when all indexes succeeded": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job with successPolicy succeededCount rule should succeeded even when some indexes remain pending": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job with successPolicy succeededIndexes rule should succeeded even when some indexes remain pending": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] ReplicaSet Replace and Patch tests [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ReplicaSet Replicaset should have a working scale subresource [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ReplicaSet should adopt matching pods on creation and release no longer matching pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ReplicaSet should list and delete a collection of ReplicaSets [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ReplicaSet should serve a basic image on each replica with a private image": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] ReplicaSet should serve a basic image on each replica with a public image [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ReplicaSet should surface a failure condition on a common issue like exceeded quota": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] ReplicaSet should validate Replicaset Status endpoints [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ReplicationController should adopt matching pods on creation [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ReplicationController should get and update a ReplicationController scale [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ReplicationController should release no longer matching pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ReplicationController should serve a basic image on each replica with a private image": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] ReplicationController should serve a basic image on each replica with a public image [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ReplicationController should surface a failure condition on a common issue like exceeded quota [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ReplicationController should test the lifecycle of a ReplicationController [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] StatefulSet Automatically recreate PVC for pending pod when PVC is missing PVC should be recreated when pod is pending due to missing PVC [Disruptive] [Serial]": " [Suite:k8s]", + + "[sig-apps] StatefulSet AvailableReplicas should get updated accordingly when MinReadySeconds is enabled": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance]": " [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance]": " [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Should recreate evicted statefulset [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should adopt matching orphans and release non-matching pods": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should have a working scale subresource [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should implement legacy replacement when the update strategy is OnDelete": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should list, patch and delete a collection of StatefulSets [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should not deadlock when a pod's predecessor fails": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications for partiton1 and delete pod-0 with failing container": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications for partiton1 and delete pod-0 without failing container": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications with PVCs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should provide basic identity": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should validate Statefulset Status endpoints [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] StatefulSet Deploy clustered applications [Feature:StatefulSet] [Slow] should creating a working CockroachDB cluster": " [Suite:k8s]", + + "[sig-apps] StatefulSet Deploy clustered applications [Feature:StatefulSet] [Slow] should creating a working mysql cluster": " [Suite:k8s]", + + "[sig-apps] StatefulSet Deploy clustered applications [Feature:StatefulSet] [Slow] should creating a working redis cluster": " [Suite:k8s]", + + "[sig-apps] StatefulSet Deploy clustered applications [Feature:StatefulSet] [Slow] should creating a working zookeeper cluster": " [Suite:k8s]", + + "[sig-apps] StatefulSet MinReadySeconds should be honored when enabled": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should delete PVCs after adopting pod (WhenDeleted)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should delete PVCs after adopting pod (WhenScaled)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should delete PVCs with a OnScaledown policy": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should delete PVCs with a WhenDeleted policy": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should not delete PVC with OnScaledown policy if another controller owns the PVC": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should not delete PVCs when there is another controller": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Scaling StatefulSetStartOrdinal Decreasing .start.ordinal": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Scaling StatefulSetStartOrdinal Increasing .start.ordinal": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Scaling StatefulSetStartOrdinal Removing .start.ordinal": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Scaling StatefulSetStartOrdinal Setting .start.ordinal": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] TTLAfterFinished job should be deleted once it finishes after TTL seconds": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] stateful Upgrade [Feature:StatefulUpgrade] stateful upgrade should maintain a functioning cluster": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-architecture] Conformance Tests should have at least two untainted nodes [Conformance]": " [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-auth] Certificates API [Privileged:ClusterAdmin] should support CSR API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-auth] Certificates API [Privileged:ClusterAdmin] should support building a client with a CSR": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] SelfSubjectReview should support SelfSubjectReview API operations": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] SelfSubjectReview testing SSR in different API groups authentication/v1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] SelfSubjectReview testing SSR in different API groups authentication/v1beta1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] ServiceAccount admission controller migration [Feature:BoundServiceAccountTokenVolume] master upgrade should maintain a functioning cluster": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-auth] ServiceAccounts ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-auth] ServiceAccounts no secret-based service account token should be auto-generated": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] ServiceAccounts should allow opting out of API token automount [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-auth] ServiceAccounts should create a serviceAccountToken and ensure a successful TokenReview [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-auth] ServiceAccounts should guarantee kube-root-ca.crt exist in any namespace [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-auth] ServiceAccounts should mount an API token into pods [Conformance]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-auth] ServiceAccounts should mount projected service account token [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-auth] ServiceAccounts should run through the lifecycle of a ServiceAccount [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-auth] ServiceAccounts should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly] [NodeFeature:FSGroup]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow]": " [Suite:k8s]", + + "[sig-auth] ServiceAccounts should update a ServiceAccount [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-auth] SubjectReview should support SubjectReview API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-auth] ValidatingAdmissionPolicy can restrict access by-node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] [Feature:ClusterTrustBundle] [Feature:ClusterTrustBundleProjection] [Serial] should be able to mount a big number (>100) of CTBs": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-auth] [Feature:ClusterTrustBundle] [Feature:ClusterTrustBundleProjection] [Serial] should be able to mount a single ClusterTrustBundle by name": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-auth] [Feature:ClusterTrustBundle] [Feature:ClusterTrustBundleProjection] [Serial] should be able to specify multiple CTB volumes": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-auth] [Feature:ClusterTrustBundle] [Feature:ClusterTrustBundleProjection] [Serial] should be capable to mount multiple trust bundles by signer+labels can combine all signer CTBs with an empty label selector": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-auth] [Feature:ClusterTrustBundle] [Feature:ClusterTrustBundleProjection] [Serial] should be capable to mount multiple trust bundles by signer+labels can combine multiple CTBs with signer name and label selector": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-auth] [Feature:ClusterTrustBundle] [Feature:ClusterTrustBundleProjection] [Serial] should be capable to mount multiple trust bundles by signer+labels should start if only signer name and explicit label selector matches nothing + optional=true": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-auth] [Feature:ClusterTrustBundle] [Feature:ClusterTrustBundleProjection] [Serial] should be capable to mount multiple trust bundles by signer+labels should start if only signer name and nil label selector + optional=true": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-auth] [Feature:ClusterTrustBundle] [Feature:ClusterTrustBundleProjection] [Serial] should prevent a pod from starting if: sets optional=false and no trust bundle matches query": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-auth] [Feature:ClusterTrustBundle] [Feature:ClusterTrustBundleProjection] [Serial] should prevent a pod from starting if: sets optional=false and the configured CTB does not exist": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-auth] [Feature:NodeAuthenticator] The kubelet can delegate ServiceAccount tokens to the API server": " [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] [Feature:NodeAuthenticator] The kubelet's main port 10250 should reject requests with no credentials": " [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] [Feature:NodeAuthorizer] A node shouldn't be able to create another node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] [Feature:NodeAuthorizer] A node shouldn't be able to delete another node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] [Feature:NodeAuthorizer] Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error": " [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] [Feature:NodeAuthorizer] Getting a non-existent secret should exit with the Forbidden error, not a NotFound error": " [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] [Feature:NodeAuthorizer] Getting a secret for a workload the node has access to should succeed": " [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] [Feature:NodeAuthorizer] Getting an existing configmap should exit with the Forbidden error": " [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] [Feature:NodeAuthorizer] Getting an existing secret should exit with the Forbidden error": " [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) CustomResourceDefinition Should scale with a CRD targetRef": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) ReplicationController light Should scale from 1 pod to 2 pods": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) ReplicationController light [Slow] Should scale from 2 pods to 1 pod": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod using Average Utilization for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case) Should not scale up on a busy sidecar with an idle application": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods on a busy application with an idle sidecar container": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicationController Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods and verify decision stability": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicationController Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod and verify decision stability": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with autoscaling disabled shouldn't scale down": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with autoscaling disabled shouldn't scale up": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with both scale up and down controls configured should keep recommendation within the range over two stabilization windows": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with both scale up and down controls configured should keep recommendation within the range with stabilization window and pod limit rate": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with long upscale stabilization window should scale up only after the stabilization period": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by number of Pods rate should scale down no more than given number of Pods per minute": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by number of Pods rate should scale up no more than given number of Pods per minute": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by percentage should scale down no more than given percentage of current Pods per minute": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by percentage should scale up no more than given percentage of current Pods per minute": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with short downscale stabilization window should scale down soon after the stabilization period": " [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding Shutdown client connection while the remote stream is writing data to the port-forward connection port-forward should keep working after detect broken connection": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 should support forwarding over websockets": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects NO client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects a client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects a client request should support a client that connects, sends NO DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on localhost should support forwarding over websockets": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects NO client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects a client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects a client request should support a client that connects, sends NO DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding with a pod being removed should stop port-forwarding": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Guestbook application should create and stop a working application [Conformance]": " [Slow] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl api-versions should check if v1 is in available api versions [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl apply apply set/view last-applied": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl apply should apply a new configuration to an existing RC": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl apply should reuse port when apply to an existing SVC": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl cluster-info dump should check if cluster-info dump succeeds": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl cluster-info should check if Kubernetes control plane services is included in cluster-info [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl copy should copy a file from a running Pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl create quota should create a quota with scopes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl create quota should create a quota without scopes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl create quota should reject quota with invalid scopes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for cronjob": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for rc and pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl diff should check if kubectl diff finds a difference for Deployments [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl events should show event when pod is created": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl expose should create services for rc [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl get componentstatuses should get componentstatuses": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl label should update the label on a resource [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl patch should add annotations for pods in rc [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl prune with applyset should apply and prune objects": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl replace should update a single-container pod's image [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl run pod should create a pod from an image when restart is Never [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl server-side dry-run should check if kubectl can dry-run update Pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl taint [Serial] should remove all the taints with the same key off a node": " [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl taint [Serial] should update the taint on a node": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl validation should create/apply a CR with unknown fields for CRD with no validation schema": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl validation should create/apply a valid CR for CRD with validation schema": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl validation should create/apply an invalid/valid CR with arbitrary-extra properties for CRD with partially-specified validation schema": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl validation should detect unknown metadata fields in both the root and embedded object of a CR": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl validation should detect unknown metadata fields of a typed object": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl version should check is all data is printed [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Proxy server should support --unix-socket=/path [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Proxy server should support proxy with --port 0 [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod Kubectl run [Slow] running a failing command with --leave-stdin-open": " [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod Kubectl run [Slow] running a failing command without --restart=Never": " [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod Kubectl run [Slow] running a failing command without --restart=Never, but with --rm": " [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod Kubectl run running a failing command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod Kubectl run running a successful command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should contain last line of the log": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should return command exit codes execing into a container with a failing command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should return command exit codes execing into a container with a successful command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should return command exit codes should handle in-cluster config": " [Disabled:Broken] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should return command exit codes should support port-forward": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should support exec through an HTTP proxy": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should support exec through kubectl proxy": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should support exec using resource/name": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should support exec": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should support inline execution and attach with websockets or fallback to spdy": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should support inline execution and attach": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Update Demo should create and stop a replication controller [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Update Demo should scale a replication controller [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client kubectl subresource flag GET on status subresource of built-in type (node) returns identical info as GET on the built-in type": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client kubectl subresource flag should not be used in a bulk GET": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client kubectl wait should ignore not found error with --for=delete": " [Disabled:Broken] [Suite:k8s]", + + "[sig-cli] Kubectl delete interactive based on user confirmation input": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl exec should be able to execute 1000 times in a container": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl logs all pod logs the Deployment has 2 replicas and each pod has 2 containers should get logs from all pods based on default container": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl logs all pod logs the Deployment has 2 replicas and each pod has 2 containers should get logs from each pod and each container in Deployment": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl logs default container logs the second container is the default-container by annotation should log default container if not specified": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl logs logs should be able to retrieve and filter logs [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl rollout undo undo should rollback and update deployment env": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] kubectl debug custom profile should be applied on static profiles on ephemeral container": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] kubectl debug custom profile should be applied on static profiles while copying from pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Downgrade [Feature:Downgrade] cluster downgrade should maintain a functioning cluster [Feature:ClusterDowngrade]": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] GKE node pools [Feature:GKENodePool] should create a cluster with multiple node pools [Feature:GKENodePool]": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] HA-master [Feature:HAMaster] survive addition/removal replicas different zones [Serial] [Disruptive]": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] HA-master [Feature:HAMaster] survive addition/removal replicas multizone workers [Serial] [Disruptive]": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] HA-master [Feature:HAMaster] survive addition/removal replicas same zone [Serial] [Disruptive]": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Nodes [Disruptive] Resize [Slow] should be able to add nodes": " [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Nodes [Disruptive] Resize [Slow] should be able to delete nodes": " [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Ports Security Check [Feature:KubeletSecurity] should not be able to proxy to cadvisor port 4194 using proxy subresource": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Ports Security Check [Feature:KubeletSecurity] should not be able to proxy to the readonly kubelet port 10255 using proxy subresource": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Ports Security Check [Feature:KubeletSecurity] should not have port 10255 open on its all public IP addresses": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Ports Security Check [Feature:KubeletSecurity] should not have port 4194 open on its all public IP addresses": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by dropping all inbound packets for a while and ensure they function afterwards": " [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by dropping all outbound packets for a while and ensure they function afterwards": " [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by ordering clean reboot and ensure they function upon restart": " [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by ordering unclean reboot and ensure they function upon restart": " [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by switching off the network interface and ensure they function upon switch on": " [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by triggering kernel panic and ensure they function upon restart": " [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Restart [Disruptive] [KubeUp] should restart all nodes and ensure all nodes and pods recover": " [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Upgrade [Feature:Upgrade] cluster upgrade should maintain a functioning cluster [Feature:ClusterUpgrade]": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Upgrade [Feature:Upgrade] master upgrade should maintain a functioning cluster [Feature:MasterUpgrade]": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] [Disruptive] NodeLease NodeLease deletion node lease should be deleted when corresponding node is deleted": " [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + + "[sig-cloud-provider] [Feature:CloudProvider] [Disruptive] Nodes should be deleted on API server if it doesn't exist in the cloud provider": " [Serial] [Suite:k8s]", + + "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should delete the signed bootstrap tokens from clusterInfo ConfigMap when bootstrap token is deleted": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should delete the token secret when the secret expired": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should not delete the token secret when the secret is not expired": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should resign the bootstrap tokens when the clusterInfo ConfigMap updated [Serial] [Disruptive]": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should sign the new added bootstrap tokens": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-instrumentation] Events API should delete a collection of events [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-instrumentation] Events API should ensure that an event can be fetched, patched, deleted, and listed [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-instrumentation] Events should delete a collection of events [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-instrumentation] Events should manage the lifecycle of an event [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-instrumentation] Logging soak [Performance] [Slow] [Disruptive] should survive logging 1KB every 1s seconds, for a duration of 2m0s": " [Serial] [Suite:k8s]", + + "[sig-instrumentation] Metrics should grab all metrics from kubelet /metrics/resource endpoint": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-instrumentation] MetricsGrabber should grab all metrics from API server.": " [Disabled:Broken] [Suite:k8s]", + + "[sig-instrumentation] MetricsGrabber should grab all metrics from a ControllerManager.": " [Disabled:Broken] [Suite:k8s]", + + "[sig-instrumentation] MetricsGrabber should grab all metrics from a Kubelet.": " [Disabled:Broken] [Suite:k8s]", + + "[sig-instrumentation] MetricsGrabber should grab all metrics from a Scheduler.": " [Disabled:Broken] [Suite:k8s]", + + "[sig-instrumentation] MetricsGrabber should grab all metrics slis from API server.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] CVE-2021-29923 IPv4 Service Type ClusterIP with leading zeros should work interpreted as decimal": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Connectivity Pod Lifecycle should be able to connect from a Pod to a terminating Pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Connectivity Pod Lifecycle should be able to connect to other Pod from a terminating Pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-network] Conntrack proxy implementation should not be vulnerable to the invalid conntrack state bug [Privileged]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Conntrack should be able to preserve UDP traffic when initial unready endpoints get ready": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a ClusterIP service and client is hostNetwork": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a ClusterIP service with InternalTrafficPolicy set to Local": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a ClusterIP service": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a NodePort service": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] DNS HostNetwork should resolve DNS of partial qualified names for services on hostNetwork pods with dnsPolicy: ClusterFirstWithHostNet [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-network] DNS HostNetwork spec.Hostname field is not silently ignored and is used for hostname for a Pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] DNS HostNetwork spec.Hostname field is silently ignored and the node hostname is used when hostNetwork is set to true for a Pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] DNS [Feature:RelaxedDNSSearchValidation] [Feature:Alpha] should work with a search path containing an underscore and a search path with a single dot": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-network] DNS configMap nameserver Change stubDomain should be able to change stubDomain configuration [Slow] [Serial]": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-network] DNS configMap nameserver Forward PTR lookup should forward PTR records lookup to upstream nameserver [Slow] [Serial]": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-network] DNS configMap nameserver Forward external name lookup should forward externalname lookup to upstream nameserver [Slow] [Serial]": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] DNS should provide DNS for ExternalName services [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] DNS should provide DNS for pods for Hostname [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] DNS should provide DNS for pods for Subdomain [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] DNS should provide DNS for services [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] DNS should provide DNS for the cluster [Conformance]": " [Skipped:Proxy] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] DNS should provide DNS for the cluster [Provider:GCE]": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] DNS should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance]": " [Skipped:Proxy] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] DNS should resolve DNS of partial qualified names for the cluster [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] DNS should support configurable pod DNS nameservers [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] DNS should support configurable pod resolv.conf": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] DNS should work with the pod containing more than 6 DNS search paths and longer than 256 search list characters": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] EndpointSlice should create Endpoints and EndpointSlices for Pods matching a Service [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] EndpointSlice should create and delete Endpoints and EndpointSlices for a Service with a selector specified [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] EndpointSlice should have Endpoints and EndpointSlices pointing to API Server [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] EndpointSlice should support a Service with multiple endpoint IPs specified in multiple EndpointSlices": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] EndpointSlice should support a Service with multiple ports specified in multiple EndpointSlices": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] EndpointSlice should support creating EndpointSlice API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] EndpointSliceMirroring should mirror a custom Endpoint with multiple subsets and same IP address": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] EndpointSliceMirroring should mirror a custom Endpoints resource through create update and delete [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] HostPort validates that there is no conflict between pods with same hostPort but different hostIP and protocol [LinuxOnly] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Ingress API should support creating Ingress API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] IngressClass API should support creating IngressClass API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] IngressClass [Feature:Ingress] should allow IngressClass to have Namespace-scoped parameters [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-network] IngressClass [Feature:Ingress] should choose the one with the later CreationTimestamp, if equal the one with the lower name when two ingressClasses are marked as default [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-network] IngressClass [Feature:Ingress] should not set default value if no default IngressClass [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-network] IngressClass [Feature:Ingress] should set default value on new IngressClass [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-network] KubeProxy should set TCP CLOSE_WAIT timeout [Privileged]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] KubeProxy should update metric for tracking accepted packets destined for localhost nodeports": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] LoadBalancers ExternalTrafficPolicy: Local [Feature:LoadBalancer] [Slow] should only target nodes with endpoints": " [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + + "[sig-network] LoadBalancers ExternalTrafficPolicy: Local [Feature:LoadBalancer] [Slow] should target all nodes with endpoints": " [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + + "[sig-network] LoadBalancers ExternalTrafficPolicy: Local [Feature:LoadBalancer] [Slow] should work for type=LoadBalancer": " [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + + "[sig-network] LoadBalancers ExternalTrafficPolicy: Local [Feature:LoadBalancer] [Slow] should work from pods": " [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + + "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to change the type and ports of a TCP service [Slow]": " [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + + "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to change the type and ports of a UDP service [Slow]": " [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + + "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to create LoadBalancer Service without NodePort and change it [Slow]": " [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + + "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on different nodes": " [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on the same nodes": " [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to switch session affinity for LoadBalancer service with Cluster traffic policy [Slow] [LinuxOnly]": " [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + + "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to switch session affinity for LoadBalancer service with Local traffic policy [Slow] [LinuxOnly]": " [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + + "[sig-network] LoadBalancers [Feature:LoadBalancer] should handle load balancer cleanup finalizer for service [Slow]": " [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + + "[sig-network] LoadBalancers [Feature:LoadBalancer] should have session affinity work for LoadBalancer service with Cluster traffic policy [Slow] [LinuxOnly]": " [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + + "[sig-network] LoadBalancers [Feature:LoadBalancer] should have session affinity work for LoadBalancer service with Local traffic policy [Slow] [LinuxOnly]": " [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + + "[sig-network] LoadBalancers [Feature:LoadBalancer] should not have connectivity disruption during rolling update with externalTrafficPolicy=Cluster [Slow]": " [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + + "[sig-network] LoadBalancers [Feature:LoadBalancer] should not have connectivity disruption during rolling update with externalTrafficPolicy=Local [Slow]": " [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + + "[sig-network] LoadBalancers [Feature:LoadBalancer] should only allow access from service loadbalancer source ranges [Slow]": " [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + + "[sig-network] Netpol API should support creating NetworkPolicy API operations": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol API should support creating NetworkPolicy API with endport field": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should allow egress access on one named port [Feature:NetworkPolicy]": " [Skipped:Network/OVNKubernetes] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should allow egress access to server in CIDR block [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access from namespace on one named port [Feature:NetworkPolicy]": " [Skipped:Network/OVNKubernetes] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access from updated namespace [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access from updated pod [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access on one named port [Feature:NetworkPolicy]": " [Skipped:Network/OVNKubernetes] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should deny egress from all pods in a namespace [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should deny egress from pods based on PodSelector [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should deny ingress access to updated pod [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should deny ingress from pods on other namespaces [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce ingress policy allowing any port traffic to a server on a specific protocol [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on Multiple PodSelectors and NamespaceSelectors [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on NamespaceSelector with MatchExpressions [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on NamespaceSelector with MatchExpressions using default ns label [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on PodSelector or NamespaceSelector [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on PodSelector with MatchExpressions [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on Ports [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on any PodSelectors [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow ingress traffic for a target [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow ingress traffic from pods in all namespaces [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic based on NamespaceSelector with MatchLabels using default ns label [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic from pods within server namespace based on PodSelector [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce updated policy [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should not allow access by TCP when a policy specifies only UDP [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should not mistakenly treat 'protocol: SCTP' as 'protocol: TCP', even if the plugin doesn't support SCTP [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should properly isolate pods that are selected by a policy allowing SCTP, even if the plugin doesn't support SCTP [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should stop enforcing policies after they are deleted [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should support a 'default-deny-all' policy [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should support allow-all policy [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should support denying of egress traffic on the client side (even if the server explicitly allows this traffic) [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should work with Ingress, Egress specified together [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol [Feature:SCTPConnectivity] [LinuxOnly] NetworkPolicy between server and client using SCTP should enforce policy based on Ports [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol [Feature:SCTPConnectivity] [LinuxOnly] NetworkPolicy between server and client using SCTP should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol [Feature:SCTPConnectivity] [LinuxOnly] NetworkPolicy between server and client using SCTP should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should enforce policy based on Ports [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Pods should function for intra-pod communication: http [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Pods should function for intra-pod communication: sctp [LinuxOnly] [Feature:SCTPConnectivity]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Pods should function for intra-pod communication: udp [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Pods should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Pods should function for node-pod communication: sctp [LinuxOnly] [Feature:SCTPConnectivity]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Pods should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should be able to handle large requests: http": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should be able to handle large requests: udp": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for client IP based session affinity: http [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for client IP based session affinity: udp [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for endpoint-Service: http": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for endpoint-Service: sctp [Feature:SCTPConnectivity]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for endpoint-Service: udp": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for multiple endpoint-Services with same selector": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for node-Service: http": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for node-Service: sctp [Feature:SCTPConnectivity]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for node-Service: udp": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for pod-Service: http": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for pod-Service: sctp [Feature:SCTPConnectivity]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for pod-Service: udp": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for service endpoints using hostNetwork": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should support basic nodePort: udp functionality": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should update endpoints: http": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should update endpoints: udp": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should update nodePort: http [Slow]": " [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should update nodePort: udp [Slow]": " [Suite:k8s]", + + "[sig-network] Networking IPerf2 [Feature:Networking-Performance] should run iperf2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking should allow creating a Pod with an SCTP HostPort [LinuxOnly] [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-network] Networking should check kube-proxy urls": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Networking should provide Internet connection for containers [Feature:Networking-IPv4]": " [Skipped:Disconnected] [Skipped:Proxy] [Skipped:azure] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking should provide Internet connection for containers [Feature:Networking-IPv6] [Experimental][LinuxOnly]": " [Disabled:Broken] [Skipped:Disconnected] [Skipped:Proxy] [Skipped:azure] [Suite:k8s]", + + "[sig-network] Networking should provide unchanging, static URL paths for kubernetes api services": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Networking should provider Internet connection for containers using DNS [Feature:Networking-DNS]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Networking should recreate its iptables rules if they are deleted [Disruptive]": " [Serial] [Suite:k8s]", + + "[sig-network] NoSNAT Should be able to send traffic between Pods without SNAT": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Proxy version v1 A set of valid responses are returned for both pod and service Proxy [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Proxy version v1 A set of valid responses are returned for both pod and service ProxyWithPath [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Proxy version v1 should proxy logs on node using proxy subresource ": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Proxy version v1 should proxy logs on node with explicit kubelet port using proxy subresource ": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Proxy version v1 should proxy through a service and a pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Service endpoints latency should not be very high [Conformance]": " [Serial] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-network] Services should allow creating a basic SCTP service with pod and endpoints [LinuxOnly] [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-network] Services should allow pods to hairpin back to themselves through services": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should be able to change the type from ClusterIP to ExternalName [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should be able to change the type from ExternalName to ClusterIP [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should be able to change the type from ExternalName to NodePort [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should be able to change the type from NodePort to ExternalName [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should be able to connect to terminating and unready endpoints if PublishNotReadyAddresses is true": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should be able to create a functioning NodePort service [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should be able to up and down services": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Services should be able to update service type to NodePort listening on same port number but different protocols": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should be possible to connect to a service via ExternalIP when the external IP is not assigned to a node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should be rejected for evicted pods (no endpoints exist)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should be rejected when no endpoints exist": " [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should be updated after adding or deleting ports ": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should check NodePort out-of-range": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should complete a service status lifecycle [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should create endpoints for unready pods": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should delete a collection of services [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should fail health check node port if there are only terminating endpoints": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should fallback to local terminating endpoints when there are no ready endpoints with externalTrafficPolicy=Local": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should fallback to local terminating endpoints when there are no ready endpoints with internalTrafficPolicy=Local": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should fallback to terminating endpoints when there are no ready endpoints with externallTrafficPolicy=Cluster": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should fallback to terminating endpoints when there are no ready endpoints with internalTrafficPolicy=Cluster": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should find a service from listing all namespaces [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should have session affinity timeout work for NodePort service [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should have session affinity timeout work for service with type clusterIP [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should have session affinity work for NodePort service [LinuxOnly] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should implement NodePort and HealthCheckNodePort correctly when ExternalTrafficPolicy changes": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-network] Services should implement service.kubernetes.io/headless": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Services should implement service.kubernetes.io/service-proxy-name": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Services should not be able to connect to terminating and unready endpoints if PublishNotReadyAddresses is false": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should preserve source pod IP for traffic thru service cluster IP [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should prevent NodePort collisions": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should provide secure master service [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should release NodePorts on delete": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should respect internalTrafficPolicy=Local Pod (hostNetwork: true) to Pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should respect internalTrafficPolicy=Local Pod and Node, to Pod (hostNetwork: true)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should respect internalTrafficPolicy=Local Pod to Pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should serve a basic endpoint from pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should serve endpoints on same port and different protocol for internal traffic on Type LoadBalancer ": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should serve endpoints on same port and different protocols [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should serve multiport endpoints from pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should support externalTrafficPolicy=Local for type=NodePort": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should test the lifecycle of an Endpoint [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should work after restarting apiserver [Disruptive]": " [Serial] [Suite:k8s]", + + "[sig-network] Services should work after restarting kube-proxy [Disruptive]": " [Serial] [Suite:k8s]", + + "[sig-network] Services should work after the service has been recreated": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should be able to handle large requests: http": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should be able to handle large requests: udp": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for client IP based session affinity: http [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for client IP based session affinity: udp [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for endpoint-Service: http": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for endpoint-Service: udp": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for node-Service: http": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for node-Service: udp": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for pod-Service: http": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for pod-Service: sctp [Feature:SCTPConnectivity]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for pod-Service: udp": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for service endpoints using hostNetwork": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should update endpoints: http": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should update endpoints: udp": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] should be able to reach pod on ipv4 and ipv6 ip": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] should create a single stack service with cluster ip from primary service range": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] should create pod, add ipv6 and ipv4 ip to host ips": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] should create pod, add ipv6 and ipv4 ip to pod ips": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] should create service with ipv4 cluster ip": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] should create service with ipv4,v6 cluster ip": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] should create service with ipv6 cluster ip": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] should create service with ipv6,v4 cluster ip": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] should have ipv4 and ipv6 internal node ip": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:PerformanceDNS] [Serial] Should answer DNS query for maximum number of services per cluster": " [Slow] [Suite:k8s]", + + "[sig-network] [Feature:ServiceCIDRs] [FeatureGate:MultiCIDRServiceAllocator] [Beta] should create Services and serve on different Service CIDRs": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-network] [Feature:Topology Hints] should distribute endpoints evenly": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-network] [Feature:Traffic Distribution] when Service has trafficDistribution=PreferClose should route traffic to an endpoint that is close to the client": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] kube-proxy migration [Serial] [Disruptive] [Feature:KubeProxyDaemonSetMigration] Downgrade kube-proxy from a DaemonSet to static pods should maintain a functioning cluster [Feature:KubeProxyDaemonSetDowngrade]": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-network] kube-proxy migration [Serial] [Disruptive] [Feature:KubeProxyDaemonSetMigration] Upgrade kube-proxy from static pods to a DaemonSet should maintain a functioning cluster [Feature:KubeProxyDaemonSetUpgrade]": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-node] AppArmor load AppArmor profiles can disable an AppArmor profile, using unconfined": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] AppArmor load AppArmor profiles should enforce an AppArmor profile specified in annotations": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] AppArmor load AppArmor profiles should enforce an AppArmor profile specified on the container": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] AppArmor load AppArmor profiles should enforce an AppArmor profile specified on the pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] ConfigMap should be consumable as environment variable names when configmap keys start with a digit [Feature:RelaxedEnvironmentVariableValidation]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] ConfigMap should be consumable via environment variable [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] ConfigMap should be consumable via the environment [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] ConfigMap should fail to create ConfigMap with empty key [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] ConfigMap should run through a ConfigMap lifecycle [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] ConfigMap should update ConfigMap successfully": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart exec hook properly [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart http hook properly [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart https hook properly [MinimumKubeletVersion:1.23] [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop exec hook properly [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop http hook properly [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop https hook properly [MinimumKubeletVersion:1.23] [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Container Runtime blackbox test on terminated container should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Container Runtime blackbox test on terminated container should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Container Runtime blackbox test on terminated container should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Container Runtime blackbox test on terminated container should report termination message if TerminationMessagePath is set [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Container Runtime blackbox test on terminated container should report termination message if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Container Runtime blackbox test when running a container with a new image should be able to pull from private registry with secret [NodeConformance]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-node] Container Runtime blackbox test when running a container with a new image should be able to pull image [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Container Runtime blackbox test when running a container with a new image should not be able to pull from private registry without secret [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Container Runtime blackbox test when running a container with a new image should not be able to pull image from invalid registry [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Container Runtime blackbox test when starting a container that exits should run with the expected status [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Containers should be able to override the image's default arguments (container cmd) [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Containers should be able to override the image's default command (container entrypoint) [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Containers should be able to override the image's default command and arguments [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Containers should use the image defaults if command and args are blank [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] ResourceSlice Controller creates slices": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster DaemonSet with admin access [Feature:DRAAdminAccess]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster must apply per-node permission checks": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster must manage ResourceSlices [Slow]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster support validating admission policy for admin access [Feature:DRAAdminAccess]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster supports count/resourceclaims.resource.k8s.io ResourceQuota": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster truncates the name of a generated resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet must call NodePrepareResources even if not used by any container": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet must map configs and devices to the right containers": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet must not run a pod if a claim is not ready": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet must retry NodePrepareResources": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet must unprepare resources for force-deleted pod": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet registers plugin": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] multiple drivers using both drav1alpha4 and drapbv1beta1 work": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] multiple drivers using only drapbv1alpha4 work": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] multiple drivers using only drapbv1beta1 work": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] on multiple nodes with different ResourceSlices keeps pod pending because of CEL runtime errors": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] on multiple nodes with node-local resources uses all resources": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] on single node deletes generated claims when pod is done": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] on single node does not delete generated claims when pod is restarting": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] on single node must be possible for the driver to update the ResourceClaim.Status.Devices once allocated [Feature:DRAResourceClaimDeviceStatus]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] on single node must deallocate after use": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] on single node removes reservation from claim when pod is done": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] on single node retries pod scheduling after creating device class": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] on single node retries pod scheduling after updating device class": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] on single node runs a pod without a generated resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] on single node supports claim and class parameters": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] on single node supports external claim referenced by multiple containers of multiple pods": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] on single node supports external claim referenced by multiple pods": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] on single node supports init containers": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] on single node supports inline claim referenced by multiple containers": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] on single node supports reusing resources": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] on single node supports sharing a claim concurrently": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] on single node supports sharing a claim sequentially [Slow]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] on single node supports simple pod referencing external resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] on single node supports simple pod referencing inline resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] runs pod after driver starts": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Downward API [Serial] [Disruptive] [NodeFeature:DownwardAPIHugePages] Downward API tests for hugepages should provide container's limits.hugepages- and requests.hugepages- as env vars": " [Suite:k8s]", + + "[sig-node] Downward API [Serial] [Disruptive] [NodeFeature:DownwardAPIHugePages] Downward API tests for hugepages should provide default limits.hugepages- from node allocatable": " [Suite:k8s]", + + "[sig-node] Downward API should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Downward API should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Downward API should provide host IP and pod IP as an env var if pod uses host network [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Downward API should provide host IP as an env var [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Downward API should provide hostIPs as an env var [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Downward API should provide pod UID as env vars [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Downward API should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Ephemeral Containers [NodeConformance] should update the ephemeral containers in an existing pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Ephemeral Containers [NodeConformance] will start an ephemeral container in an existing pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Events should be sent by kubelets and the scheduler about pods scheduling and running ": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] ImageCredentialProvider [Feature:KubeletCredentialProviders] should be able to create pod with image credentials fetched from external credential provider ": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-node] InitContainer [NodeConformance] should invoke init containers on a RestartAlways pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] InitContainer [NodeConformance] should invoke init containers on a RestartNever pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] InitContainer [NodeConformance] should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] InitContainer [NodeConformance] should not start app containers if init containers fail on a RestartAlways pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Kubelet [Serial] [Slow] experimental resource usage tracking [Feature:ExperimentalResourceUsageTracking] resource tracking for 100 pods per node": " [Suite:k8s]", + + "[sig-node] Kubelet [Serial] [Slow] regular resource usage tracking [Feature:RegularResourceUsageTracking] resource tracking for 0 pods per node": " [Suite:k8s]", + + "[sig-node] Kubelet [Serial] [Slow] regular resource usage tracking [Feature:RegularResourceUsageTracking] resource tracking for 100 pods per node": " [Suite:k8s]", + + "[sig-node] Kubelet when scheduling a busybox command in a pod should print the output to logs [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Kubelet when scheduling a busybox command that always fails in a pod should be possible to delete [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Kubelet when scheduling a busybox command that always fails in a pod should have an terminated reason [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Kubelet when scheduling a read only busybox container should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Kubelet when scheduling an agnhost Pod with hostAliases should write entries to /etc/hosts [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Kubelet with pods in a privileged namespace when scheduling an agnhost Pod with hostAliases and hostNetwork should write entries to /etc/hosts when hostNetwork is enabled [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] KubeletManagedEtcHosts should test kubelet managed /etc/hosts file [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Lease lease API should be available [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Mount propagation should propagate mounts within defined scopes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] NoExecuteTaintManager Multiple Pods [Serial] evicts pods with minTolerationSeconds [Disruptive] [Conformance]": " [Skipped:SingleReplicaTopology] [Suite:k8s]", + + "[sig-node] NoExecuteTaintManager Multiple Pods [Serial] only evicts pods without tolerations from tainted nodes": " [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-node] NoExecuteTaintManager Single Pod [Serial] doesn't evict pod with tolerations from tainted nodes": " [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-node] NoExecuteTaintManager Single Pod [Serial] eventually evict pod with finite tolerations from tainted nodes": " [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-node] NoExecuteTaintManager Single Pod [Serial] evicts pods from tainted nodes": " [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-node] NoExecuteTaintManager Single Pod [Serial] pods evicted from tainted nodes have pod disruption condition": " [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-node] NoExecuteTaintManager Single Pod [Serial] removing taint cancels eviction [Disruptive] [Conformance]": " [Skipped:SingleReplicaTopology] [Suite:k8s]", + + "[sig-node] Node Lifecycle should run through the lifecycle of a node [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] NodeLease NodeLease should have OwnerReferences set": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] NodeLease NodeLease the kubelet should create and update a lease in the kube-node-lease namespace": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] NodeLease NodeLease the kubelet should report node status infrequently": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] NodeProblemDetector [NodeFeature:NodeProblemDetector] should run without error": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] pod-resize-limit-ranger-test": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] pod-resize-resource-quota-test": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] BestEffort QoS pod - empty resize": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] BestEffort pod - try requesting memory, expect error": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, mixed containers - add limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, mixed containers - add requests": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, mixed containers - scale up cpu and memory": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container - decrease CPU (RestartContainer) & memory (NotRequired)": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with CPU requests + limits, cpu requests - remove memory requests": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU limits only": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests and increase CPU limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests and increase memory limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests and limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests only": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory limits only": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests and increase CPU limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests and increase memory limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests and limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests only": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU limits only": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU requests and decrease CPU limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU requests and decrease memory limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU requests and limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU requests only": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - increase memory limits only": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - increase memory requests and decrease CPU limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - increase memory requests and decrease memory limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - increase memory requests and limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - increase memory requests only": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - remove CPU limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - remove memory limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests - decrease memory request": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests - increase cpu request": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu requests and limits - resize with equivalents": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, one container with memory requests + limits, cpu requests - remove CPU requests": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, three containers - decrease c1 resources, increase c2 resources, no change for c3 (net increase for pod)": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, three containers - increase c1 resources, no change for c2, decrease c3 resources (no net change for pod)": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Burstable QoS pod, three containers - no change for c1, increase c2 resources, decrease c3 (net decrease for pod)": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - decrease CPU & increase memory": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - decrease CPU & memory": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - increase CPU & decrease memory": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - increase CPU & memory with an extended resource": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - increase CPU & memory": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - increase CPU (NotRequired) & memory (RestartContainer)": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Serial] [Feature:InPlacePodVerticalScaling] [NodeAlphaFeature:InPlacePodVerticalScaling] Guaranteed QoS pod, three containers (c1, c2, c3) - increase: CPU (c1,c3), memory (c2) ; decrease: CPU (c2), memory (c1,c3)": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod Level Resources [Serial] [Feature:PodLevelResources] [NodeAlphaFeature:PodLevelResources] Burstable QoS pod with container resources": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod Level Resources [Serial] [Feature:PodLevelResources] [NodeAlphaFeature:PodLevelResources] Burstable QoS pod, 1 container with resources": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod Level Resources [Serial] [Feature:PodLevelResources] [NodeAlphaFeature:PodLevelResources] Burstable QoS pod, no container resources": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod Level Resources [Serial] [Feature:PodLevelResources] [NodeAlphaFeature:PodLevelResources] Guaranteed QoS pod with container resources": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod Level Resources [Serial] [Feature:PodLevelResources] [NodeAlphaFeature:PodLevelResources] Guaranteed QoS pod, 1 container with resources": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod Level Resources [Serial] [Feature:PodLevelResources] [NodeAlphaFeature:PodLevelResources] Guaranteed QoS pod, no container resources": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod garbage collector [Feature:PodGarbageCollector] [Slow] should handle the creation of 1000 pods": " [Suite:k8s]", + + "[sig-node] PodOSRejection [NodeConformance] Kubelet [LinuxOnly] should reject pod when the node OS doesn't match pod's OS": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] PodRejectionStatus Kubelet should reject pod when the node didn't have enough resource": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-node] PodTemplates should delete a collection of pod templates [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] PodTemplates should replace a pod template [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] PodTemplates should run the lifecycle of PodTemplates [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Pods Extended Delete Grace Period should be submitted and removed": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Pods Extended Pod Container Status should never report container start when an init container fails": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Pods Extended Pod Container Status should never report success for a pending container": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Pods Extended Pod Container lifecycle evicted pods should be terminal": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Pods Extended Pod Container lifecycle should not create extra sandbox if all containers are done": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Pods Extended Pod TerminationGracePeriodSeconds is negative pod with negative grace period": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Pods Extended Pods Set QOS Class should be set on Pods with matching resource requests and limits for memory and cpu [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Pods should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Pods should be submitted and removed [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Pods should be updated [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Pods should cap back-off at MaxContainerBackOff [Slow] [NodeConformance]": " [Suite:k8s]", + + "[sig-node] Pods should contain environment variables for services [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Pods should delete a collection of pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Pods should get a host IP [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Pods should have their auto-restart back-off timer reset on image update [Slow] [NodeConformance]": " [Suite:k8s]", + + "[sig-node] Pods should patch a pod status [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Pods should run through the lifecycle of Pods and PodStatus [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Pods should support pod readiness gates [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Pods should support remote command execution over websockets [NodeConformance] [Conformance]": " [Skipped:Proxy] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Pods should support retrieving logs from the container over websockets [NodeConformance] [Conformance]": " [Skipped:Proxy] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] PreStop graceful pod terminated should wait until preStop hook completes the process": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] PreStop should call prestop when killing a pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] PrivilegedPod [NodeConformance] should enable privileged commands [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should *not* be restarted by liveness probe because startup probe delays it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Probing container should *not* be restarted with a GRPC liveness probe [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Probing container should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Probing container should *not* be restarted with a non-local redirect http liveness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Probing container should be ready immediately after startupProbe succeeds": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should be restarted by liveness probe after startup probe enables it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should be restarted startup probe fails": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Probing container should be restarted with a GRPC liveness probe [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Probing container should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Probing container should be restarted with a failing exec liveness probe that took longer than the timeout": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should be restarted with a local redirect http liveness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should be restarted with an exec liveness probe with timeout [MinimumKubeletVersion:1.20] [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should have monotonically increasing restart count [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Probing container should mark readiness on pods to false and disable liveness probes while pod is in progress of terminating": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should mark readiness on pods to false while pod is in progress of terminating when a pod has a readiness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should not be ready with an exec readiness probe timeout [MinimumKubeletVersion:1.20] [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should override timeoutGracePeriodSeconds when LivenessProbe field is set [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should override timeoutGracePeriodSeconds when StartupProbe field is set [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Probing container with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] RuntimeClass should reject a Pod requesting a RuntimeClass with an unconfigured handler [NodeFeature:RuntimeHandler]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-node] RuntimeClass should reject a Pod requesting a RuntimeClass with conflicting node selector": " [Disabled:Broken] [Suite:k8s]", + + "[sig-node] RuntimeClass should reject a Pod requesting a deleted RuntimeClass [NodeConformance] [Conformance]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-node] RuntimeClass should reject a Pod requesting a non-existent RuntimeClass [NodeConformance] [Conformance]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-node] RuntimeClass should run a Pod requesting a RuntimeClass with a configured handler [NodeFeature:RuntimeHandler]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-node] RuntimeClass should run a Pod requesting a RuntimeClass with scheduling with taints [Serial]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-node] RuntimeClass should run a Pod requesting a RuntimeClass with scheduling without taints ": " [Disabled:Broken] [Suite:k8s]", + + "[sig-node] RuntimeClass should schedule a Pod requesting a RuntimeClass and initialize its Overhead [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] RuntimeClass should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] RuntimeClass should support RuntimeClasses API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] SSH should SSH to all nodes and run commands": " [Disabled:Broken] [Suite:k8s]", + + "[sig-node] Secrets should be consumable as environment variable names when secret keys start with a digit [Feature:RelaxedEnvironmentVariableValidation]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Secrets should be consumable from pods in env vars [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Secrets should be consumable via the environment [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Secrets should fail to create secret due to empty secret key [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Secrets should patch a secret [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Security Context When creating a container with runAsNonRoot should not run with an explicit root user ID [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context When creating a container with runAsNonRoot should not run without a specified user ID": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context When creating a container with runAsNonRoot should run with an explicit non-root user ID [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context When creating a container with runAsNonRoot should run with an image specified user ID": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context When creating a container with runAsUser should run the container with uid 0 [LinuxOnly] [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context When creating a container with runAsUser should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Security Context When creating a pod with HostUsers must create the user namespace if set to false [LinuxOnly] [Feature:UserNamespacesSupport]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Security Context When creating a pod with HostUsers must not create the user namespace if set to true [LinuxOnly] [Feature:UserNamespacesSupport]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Security Context When creating a pod with HostUsers should mount all volumes with proper permissions with hostUsers=false [LinuxOnly] [Feature:UserNamespacesSupport]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Security Context When creating a pod with HostUsers should set FSGroup to user inside the container with hostUsers=false [LinuxOnly] [Feature:UserNamespacesSupport]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Security Context When creating a pod with privileged should run the container as privileged when true [LinuxOnly] [NodeFeature:HostAccess]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context When creating a pod with privileged should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Security Context When creating a pod with readOnlyRootFilesystem should run the container with readonly rootfs when readOnlyRootFilesystem=true [LinuxOnly] [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context When creating a pod with readOnlyRootFilesystem should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Security Context [sig-node] SupplementalGroupsPolicy [Feature:SupplementalGroupsPolicy] when SupplementalGroupsPolicy was not set if the container's primary UID belongs to some groups in the image, it should add SupplementalGroups to them [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context [sig-node] SupplementalGroupsPolicy [Feature:SupplementalGroupsPolicy] when SupplementalGroupsPolicy was set to Merge if the container's primary UID belongs to some groups in the image, it should add SupplementalGroups to them [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context [sig-node] SupplementalGroupsPolicy [Feature:SupplementalGroupsPolicy] when SupplementalGroupsPolicy was set to Strict even if the container's primary UID belongs to some groups in the image, it should not add SupplementalGroups to them [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Security Context should support container.SecurityContext.RunAsUser [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Security Context should support pod.Spec.SecurityContext.RunAsUser [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context should support pod.Spec.SecurityContext.SupplementalGroups [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context should support seccomp default which is unconfined [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context should support seccomp runtime/default [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context should support seccomp unconfined on the container [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context should support seccomp unconfined on the pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context should support volume SELinux relabeling [Flaky] [LinuxOnly]": " [Suite:k8s]", + + "[sig-node] Security Context should support volume SELinux relabeling when using hostIPC [Flaky] [LinuxOnly]": " [Suite:k8s]", + + "[sig-node] Security Context should support volume SELinux relabeling when using hostPID [Flaky] [LinuxOnly]": " [Suite:k8s]", + + "[sig-node] Security Context when creating containers with AllowPrivilegeEscalation should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly] [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context when creating containers with AllowPrivilegeEscalation should allow privilege escalation when true [LinuxOnly] [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context when creating containers with AllowPrivilegeEscalation should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Security Context when if the container's primary UID belongs to some groups in the image [LinuxOnly] should add pod.Spec.SecurityContext.SupplementalGroups to them [LinuxOnly] in resultant supplementary groups for the container processes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should not launch unsafe, but not explicitly enabled sysctls on the node [MinimumKubeletVersion:1.21]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should reject invalid sysctls [MinimumKubeletVersion:1.21] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should support sysctls [MinimumKubeletVersion:1.21] [Environment:NotInUserNS] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should support sysctls with slashes as separator [MinimumKubeletVersion:1.23] [Environment:NotInUserNS]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] User Namespaces for Pod Security Standards [LinuxOnly] with UserNamespacesSupport and UserNamespacesPodSecurityStandards enabled should allow pod [Feature:UserNamespacesPodSecurityStandards]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Variable Expansion allow almost all printable ASCII characters as environment variable names [Feature:RelaxedEnvironmentVariableValidation]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Variable Expansion should allow composing env vars into new env vars [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Variable Expansion should allow substituting values in a container's args [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Variable Expansion should allow substituting values in a container's command [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Variable Expansion should allow substituting values in a volume subpath [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Variable Expansion should fail substituting values in a volume subpath with absolute path [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Variable Expansion should fail substituting values in a volume subpath with backticks [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Variable Expansion should succeed in writing subpaths in container [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Variable Expansion should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow] [Conformance]": " [Suite:k8s]", + + "[sig-node] [Feature:Example] Downward API should create a pod that prints his name and namespace": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [Feature:Example] Liveness liveness pods should be automatically restarted": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [Feature:Example] Secret should create a pod that reads a secret": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [Feature:GPUDevicePlugin] [Serial] Sanity test using nvidia-smi should run nvidia-smi and cuda-demo-suite": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-node] [Feature:GPUDevicePlugin] [Serial] Test using a Job should run gpu based jobs": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-node] [Feature:GPUDevicePlugin] [Serial] Test using a Pod should run gpu based matrix multiplication": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-node] [Feature:PodLifecycleSleepActionAllowZero] when create a pod with lifecycle hook using sleep action with a duration of zero seconds prestop hook using sleep action with zero duration": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] [Feature:PodLifecycleSleepAction] when create a pod with lifecycle hook using sleep action ignore terminated container": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [Feature:PodLifecycleSleepAction] when create a pod with lifecycle hook using sleep action reduce GracePeriodSeconds during runtime": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [Feature:PodLifecycleSleepAction] when create a pod with lifecycle hook using sleep action valid prestop hook using sleep action": " [Disabled:Broken] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted by liveness probe because startup probe delays it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a /healthz http liveness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a GRPC liveness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a exec \"cat /tmp/health\" liveness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a non-local redirect http liveness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a tcp:8080 liveness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be ready immediately after startupProbe succeeds": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted by liveness probe after startup probe enables it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted startup probe fails": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a /healthz http liveness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a GRPC liveness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a exec \"cat /tmp/health\" liveness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a failing exec liveness probe that took longer than the timeout": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a local redirect http liveness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with an exec liveness probe with timeout [MinimumKubeletVersion:1.20]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should have monotonically increasing restart count": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should mark readiness on pods to false and disable liveness probes while pod is in progress of terminating": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should mark readiness on pods to false while pod is in progress of terminating when a pod has a readiness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should not be ready with an exec readiness probe timeout [MinimumKubeletVersion:1.20]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should override timeoutGracePeriodSeconds when LivenessProbe field is set": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should override timeoutGracePeriodSeconds when StartupProbe field is set": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container with readiness probe should not be ready before initial delay and never restart": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container with readiness probe that fails should never be ready and never restart": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart exec hook properly": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart http hook properly": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart https hook properly [MinimumKubeletVersion:1.23]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop exec hook properly": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop http hook properly": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop https hook properly [MinimumKubeletVersion:1.23]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [Serial] Pod InPlace Resize Container (scheduler-focused) [Feature:InPlacePodVerticalScaling] pod-resize-scheduler-tests": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] crictl should be able to run crictl on the node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-node] kubelet Clean up pods on node kubelet should be able to delete 10 pods per node in 1m0s.": " [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-node] kubelet host cleanup with volume mounts [HostCleanup] [Flaky] Host cleanup after disrupting NFS volume [NFS] after stopping the nfs-server and deleting the (active) client pod, the NFS mount and the pod's UID directory should be removed.": " [Suite:k8s]", + + "[sig-node] kubelet host cleanup with volume mounts [HostCleanup] [Flaky] Host cleanup after disrupting NFS volume [NFS] after stopping the nfs-server and deleting the (sleeping) client pod, the NFS mount and the pod's UID directory should be removed.": " [Suite:k8s]", + + "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes//proxy/logs/?query=/ [Feature:NodeLogQuery] should return the Microsoft-Windows-Security-SPP logs with the pattern Health": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes//proxy/logs/?query=/ [Feature:NodeLogQuery] should return the Microsoft-Windows-Security-SPP logs": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes//proxy/logs/?query=/ [Feature:NodeLogQuery] should return the error with an empty --query option": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes//proxy/logs/?query=/ [Feature:NodeLogQuery] should return the kubelet logs ": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes//proxy/logs/?query=/ [Feature:NodeLogQuery] should return the kubelet logs for the current boot with the pattern container": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes//proxy/logs/?query=/ [Feature:NodeLogQuery] should return the kubelet logs for the current boot": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes//proxy/logs/?query=/ [Feature:NodeLogQuery] should return the kubelet logs since the current date and time": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes//proxy/logs/?query=/ [Feature:NodeLogQuery] should return the last three lines of the Microsoft-Windows-Security-SPP logs": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes//proxy/logs/?query=/ [Feature:NodeLogQuery] should return the last three lines of the kubelet logs": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-node] specific log stream [Feature:PodLogsQuerySplitStreams] kubectl get --raw /api/v1/namespaces/default/pods//log?stream": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-scheduling] LimitRange should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-scheduling] LimitRange should list, patch and delete a LimitRange by collection [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-scheduling] Multi-AZ Clusters should spread the pods of a replication controller across zones [Serial]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-scheduling] Multi-AZ Clusters should spread the pods of a service across zones [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] PodTopologySpread Filtering validates 4 pods with MaxSkew=1 are evenly distributed into 2 nodes": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates Pods with non-empty schedulingGates are blocked on scheduling": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates local ephemeral storage resource limits of pods that are allowed to run [Feature:LocalStorageCapacityIsolation]": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates pod overhead is considered along with resource limits of pods that are allowed to run verify pod overhead is accounted for": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates resource limits of pods that are allowed to run [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates that NodeAffinity is respected if not matching": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector is respected if matching [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector is respected if not matching [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates that required NodeAffinity setting is respected if matching": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates that taints-tolerations is respected if matching": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates that taints-tolerations is respected if not matching": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance]": " [Slow] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates that there is no conflict between pods with same hostPort but different hostIP and protocol": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] when PVC has node-affinity to non-existent/illegal nodes, the pod should be scheduled normally if suitable nodes exist": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPreemption [Serial] PodTopologySpread Preemption validates proper pods are preempted": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPreemption [Serial] PreemptionExecutionPath runs ReplicaSets to verify preemption running path [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-scheduling] SchedulerPreemption [Serial] PriorityClass endpoints verify PriorityClass endpoints can be operated with different HTTP methods [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-scheduling] SchedulerPreemption [Serial] validates basic preemption works [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-scheduling] SchedulerPreemption [Serial] validates lower priority pod preemption by critical pod [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-scheduling] SchedulerPreemption [Serial] validates pod disruption condition is added to the preempted pod [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-scheduling] SchedulerPreemption [Serial] validates various priority Pods preempt expectedly with the async preemption [Feature:SchedulerAsyncPreemption]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-scheduling] SchedulerPriorities [Serial] Pod should be preferably scheduled to nodes pod can tolerate": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPriorities [Serial] Pod should be scheduled to node that don't match the PodAntiAffinity terms": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPriorities [Serial] PodTopologySpread Scoring validates pod should be preferably scheduled to node which makes the matching pods more evenly distributed": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Mock Node Volume Health [Feature:CSIVolumeHealth] [FeatureGate:CSIVolumeHealth] [Alpha] CSI Mock Node Volume Health [Slow] return normal volume stats with abnormal volume condition": " [Suite:k8s]", + + "[sig-storage] CSI Mock Node Volume Health [Feature:CSIVolumeHealth] [FeatureGate:CSIVolumeHealth] [Alpha] CSI Mock Node Volume Health [Slow] return normal volume stats without volume condition": " [Suite:k8s]", + + "[sig-storage] CSI Mock Node Volume Health [Feature:CSIVolumeHealth] [FeatureGate:CSIVolumeHealth] [Alpha] CSI Mock Node Volume Health [Slow] return normal volume stats": " [Suite:k8s]", + + "[sig-storage] CSI Mock fsgroup as mount option Delegate FSGroup to CSI driver [LinuxOnly] should not pass FSGroup to CSI driver if it is set in pod and driver supports VOLUME_MOUNT_GROUP": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock fsgroup as mount option Delegate FSGroup to CSI driver [LinuxOnly] should pass FSGroup to CSI driver if it is set in pod and driver supports VOLUME_MOUNT_GROUP": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock honor pv reclaim policy [Feature:HonorPVReclaimPolicy] [FeatureGate:HonorPVReclaimPolicy] [Beta] CSI honor pv reclaim policy using mock driver Dynamic provisioning should honor pv delete reclaim policy": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock honor pv reclaim policy [Feature:HonorPVReclaimPolicy] [FeatureGate:HonorPVReclaimPolicy] [Beta] CSI honor pv reclaim policy using mock driver Dynamic provisioning should honor pv retain reclaim policy": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock honor pv reclaim policy [Feature:HonorPVReclaimPolicy] [FeatureGate:HonorPVReclaimPolicy] [Beta] CSI honor pv reclaim policy using mock driver Static provisioning should honor pv delete reclaim policy": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock honor pv reclaim policy [Feature:HonorPVReclaimPolicy] [FeatureGate:HonorPVReclaimPolicy] [Beta] CSI honor pv reclaim policy using mock driver Static provisioning should honor pv retain reclaim policy": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should add SELinux mount option to existing mount options [FeatureGate:SELinuxMountReadWriteOncePod] [Beta]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not pass SELinux mount option for CSI driver that does not support SELinux mount [FeatureGate:SELinuxMountReadWriteOncePod] [Beta]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not pass SELinux mount option for Pod without SELinux context [FeatureGate:SELinuxMountReadWriteOncePod] [Beta]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not pass SELinux mount option for RWO volume with SELinuxMount disabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Feature:SELinuxMountReadWriteOncePodOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not unstage RWO volume when starting a second pod with the same SELinux context [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not unstage RWOP volume when starting a second pod with the same SELinux context [FeatureGate:SELinuxMountReadWriteOncePod] [Beta]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should pass SELinux mount option for RWO volume with SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should pass SELinux mount option for RWOP volume and Pod with SELinux context set [FeatureGate:SELinuxMountReadWriteOncePod] [Beta]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should unstage RWO volume when starting a second pod with different SELinux context [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should unstage RWOP volume when starting a second pod with different SELinux context [FeatureGate:SELinuxMountReadWriteOncePod] [Beta]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped on two Pods with a different context on RWO volume and SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped on two Pods with a different context on RWOP volume [FeatureGate:SELinuxMountReadWriteOncePod] [Beta]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped on two Pods with a different context on RWX volume and SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped on two Pods with the same context on RWO volume and SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] warning is bumped on two Pods with a different context on RWO volume [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Feature:SELinuxMountReadWriteOncePodOnly]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] warning is not bumped on two Pods with the same context on RWO volume [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Feature:SELinuxMountReadWriteOncePodOnly]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Mock volume attach CSI CSIDriver deployment after pod creation using non-attachable mock driver should bringup pod after deploying CSIDriver attach=false [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume attach CSI attach test using mock driver should not require VolumeAttach for drivers without attachment": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume attach CSI attach test using mock driver should preserve attachment policy when no CSIDriver present": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume attach CSI attach test using mock driver should require VolumeAttach for drivers with attachment": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume attach CSI attach test using mock driver should require VolumeAttach for ephemermal volume and drivers with attachment": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion CSI Volume expansion should expand volume by restarting pod if attach=off, nodeExpansion=on": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion CSI Volume expansion should expand volume by restarting pod if attach=on, nodeExpansion=on": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion CSI Volume expansion should expand volume without restarting pod if nodeExpansion=off": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion CSI Volume expansion should not expand volume if resizingOnDriver=off, resizingOnSC=on": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion CSI Volume expansion should not have staging_path missing in node expand volume pod if attach=on, nodeExpansion=on": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion CSI online volume expansion should expand volume without restarting pod if attach=off, nodeExpansion=on": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion CSI online volume expansion should expand volume without restarting pod if attach=on, nodeExpansion=on": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion CSI online volume expansion with secret should expand volume without restarting pod if attach=on, nodeExpansion=on, csiNodeExpandSecret=on": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] recovery should be possible for node-only expanded volumes with final error": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] recovery should be possible for node-only expanded volumes with infeasible error": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] recovery should not be possible in partially expanded volumes": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] should allow recovery if controller expansion fails with final error": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] should allow recovery if controller expansion fails with infeasible error": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] should record target size in allocated resources": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should not update fsGroup if update from File to None": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should not update fsGroup if update from detault to None": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should update fsGroup if update from File to default": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should update fsGroup if update from None to File": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should update fsGroup if update from None to default": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should update fsGroup if update from detault to File": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy [LinuxOnly] should modify fsGroup if fsGroupPolicy=File": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy [LinuxOnly] should modify fsGroup if fsGroupPolicy=default": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy [LinuxOnly] should not modify fsGroup if fsGroupPolicy=None": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume limit CSI volume limit information using mock driver should report attach limit for generic ephemeral volume when persistent volume is attached [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume limit CSI volume limit information using mock driver should report attach limit for persistent volume when generic ephemeral volume is attached [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume limit CSI volume limit information using mock driver should report attach limit when limit is bigger than 0 [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should call NodeUnstage after NodeStage ephemeral error": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should call NodeUnstage after NodeStage success": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should not call NodeUnstage after NodeStage final error": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should retry NodeStage after NodeStage ephemeral error": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should retry NodeStage after NodeStage final error": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume node stage CSI NodeUnstage error cases [Slow] should call NodeStage after NodeUnstage success": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume node stage CSI NodeUnstage error cases [Slow] two pods: should call NodeStage after previous NodeUnstage final error": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume node stage CSI NodeUnstage error cases [Slow] two pods: should call NodeStage after previous NodeUnstage transient error": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume service account token CSIServiceAccountToken token should be plumbed down when csiServiceAccountTokenEnabled=true": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume service account token CSIServiceAccountToken token should not be plumbed down when CSIDriver is not deployed": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume service account token CSIServiceAccountToken token should not be plumbed down when csiServiceAccountTokenEnabled=false": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume snapshot CSI Snapshot Controller metrics [Feature:VolumeSnapshotDataSource] snapshot controller should emit dynamic CreateSnapshot, CreateSnapshotAndReady, and DeleteSnapshot metrics": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume snapshot CSI Snapshot Controller metrics [Feature:VolumeSnapshotDataSource] snapshot controller should emit pre-provisioned CreateSnapshot, CreateSnapshotAndReady, and DeleteSnapshot metrics": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume snapshot CSI Volume Snapshots [Feature:VolumeSnapshotDataSource] volumesnapshotcontent and pvc in Bound state with deletion timestamp set should not get deleted while snapshot finalizer exists": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume snapshot CSI Volume Snapshots secrets [Feature:VolumeSnapshotDataSource] volume snapshot create/delete with secrets": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity disabled": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity unused": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity used, have capacity": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity used, insufficient capacity": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity used, no capacity": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume storage capacity storage capacity exhausted, immediate binding": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume storage capacity storage capacity exhausted, late binding, no topology": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume storage capacity storage capacity exhausted, late binding, with topology": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume storage capacity storage capacity unlimited": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock workload info CSI PodInfoOnMount Update should be passed when update from false to true": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock workload info CSI PodInfoOnMount Update should not be passed when update from true to false": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock workload info CSI workload information using mock driver contain ephemeral=true when using inline volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock workload info CSI workload information using mock driver should be passed when podInfoOnMount=true": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock workload info CSI workload information using mock driver should not be passed when CSIDriver does not exist": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock workload info CSI workload information using mock driver should not be passed when podInfoOnMount=false": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] pvc-deletion-performance should delete volumes at scale within performance constraints [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], rwop pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] read-write-once-pod [MinimumKubeletVersion:1.27] should block a second pod from using an in-use ReadWriteOncePod volume on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] read-write-once-pod [MinimumKubeletVersion:1.27] should preempt lower priority pods using ReadWriteOncePod volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volume-lifecycle-performance should provision volumes at scale within performance constraints [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Ephemeral Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Ephemeral Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Ephemeral Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Ephemeral Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read-only inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read/write inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support multiple inline ephemeral volumes": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support two pods which have the same volume definition": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] pvc-deletion-performance should delete volumes at scale within performance constraints [Slow] [Serial]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC": " [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC": " [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC": " [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], rwop pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] read-write-once-pod [MinimumKubeletVersion:1.27] should block a second pod from using an in-use ReadWriteOncePod volume on the same node": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] read-write-once-pod [MinimumKubeletVersion:1.27] should preempt lower priority pods using ReadWriteOncePod volumes": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC": " [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC": " [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC": " [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volume-lifecycle-performance should provision volumes at scale within performance constraints [Slow] [Serial]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC": " [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC": " [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC": " [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Ephemeral Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Ephemeral Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Ephemeral Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Ephemeral Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSIInlineVolumes should run through the lifecycle of a CSIDriver [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] CSIInlineVolumes should support CSIVolumeSource in Pod API [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] CSINodes CSI Conformance should run through the lifecycle of a csinode [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] CSIStorageCapacity should support CSIStorageCapacities API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] ConfigMap Should fail non-optional pod creation due to configMap object does not exist [Slow]": " [Suite:k8s]", + + "[sig-storage] ConfigMap Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]": " [Suite:k8s]", + + "[sig-storage] ConfigMap binary data should be reflected in volume [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] ConfigMap optional updates should be reflected in volume [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] ConfigMap should be consumable from pods in volume [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] ConfigMap should be consumable from pods in volume as non-root [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] ConfigMap should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] ConfigMap should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] ConfigMap should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] ConfigMap should be consumable from pods in volume with mappings [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] ConfigMap should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] ConfigMap should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] ConfigMap should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] ConfigMap should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] ConfigMap should be immutable if `immutable` field is set [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] ConfigMap updates should be reflected in volume [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Downward API [Serial] [Disruptive] [Feature:EphemeralStorage] Downward API tests for local ephemeral storage should provide container's limits.ephemeral-storage and requests.ephemeral-storage as env vars": " [Suite:k8s]", + + "[sig-storage] Downward API [Serial] [Disruptive] [Feature:EphemeralStorage] Downward API tests for local ephemeral storage should provide default limits.ephemeral-storage from node allocatable": " [Suite:k8s]", + + "[sig-storage] Downward API volume should provide container's cpu limit [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Downward API volume should provide container's cpu request [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Downward API volume should provide container's memory limit [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Downward API volume should provide container's memory request [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Downward API volume should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Downward API volume should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Downward API volume should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Downward API volume should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Downward API volume should provide podname only [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Downward API volume should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Downward API volume should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Downward API volume should update annotations on modification [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Downward API volume should update labels on modification [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Dynamic Provisioning DynamicProvisioner Default should be disabled by changing the default annotation [Serial] [Disruptive]": " [Suite:k8s]", + + "[sig-storage] Dynamic Provisioning DynamicProvisioner Default should be disabled by removing the default annotation [Serial] [Disruptive]": " [Suite:k8s]", + + "[sig-storage] Dynamic Provisioning DynamicProvisioner Default should create and delete default persistent volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] Dynamic Provisioning DynamicProvisioner External should let an external dynamic provisioner create and delete persistent volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] Dynamic Provisioning DynamicProvisioner [Slow] [Feature:StorageProvider] deletion should be idempotent": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] Dynamic Provisioning DynamicProvisioner [Slow] [Feature:StorageProvider] should provision storage with different parameters": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] Dynamic Provisioning DynamicProvisioner [Slow] [Feature:StorageProvider] should provision storage with non-default reclaim policy Retain": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] Dynamic Provisioning DynamicProvisioner [Slow] [Feature:StorageProvider] should test that deleting a claim before the volume is provisioned deletes the volume.": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] Dynamic Provisioning Invalid AWS KMS key should report an error and create no PV": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes pod should support memory backed volumes of specified size": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes pod should support shared volumes between containers [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] files with FSGroup ownership should support (root,0644,tmpfs)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] new files should be created with FSGroup ownership when container is non-root": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] new files should be created with FSGroup ownership when container is root": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] nonexistent volume subPath should have the correct mode and owner using FSGroup": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] volume on default medium should have the correct mode using FSGroup": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] volume on tmpfs should have the correct mode using FSGroup": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] EmptyDir wrapper volumes should not cause race condition when used for configmaps [Serial] [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir wrapper volumes should not cause race condition when used for git_repo [Serial] [Slow]": " [Suite:k8s]", + + "[sig-storage] EmptyDir wrapper volumes should not conflict [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : configmap": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : projected": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : secret": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Flexvolumes should be mountable when attachable [Feature:Flexvolumes]": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Flexvolumes should be mountable when non-attachable": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] GenericPersistentVolume [Disruptive] When kubelet restarts Should test that a file written to the mount before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] GenericPersistentVolume [Disruptive] When kubelet restarts Should test that a volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] GenericPersistentVolume [Disruptive] When kubelet restarts Should test that a volume mounted to a pod that is force deleted while the kubelet is down unmounts when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] HostPath should give a volume the correct mode [LinuxOnly] [NodeConformance]": " [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] HostPath should support r/w [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] HostPath should support subPath [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] HostPathType Block Device [Slow] Should be able to mount block device 'ablkdev' successfully when HostPathType is HostPathBlockDev": " [Suite:k8s]", + + "[sig-storage] HostPathType Block Device [Slow] Should be able to mount block device 'ablkdev' successfully when HostPathType is HostPathUnset": " [Suite:k8s]", + + "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting block device 'ablkdev' when HostPathType is HostPathCharDev": " [Suite:k8s]", + + "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting block device 'ablkdev' when HostPathType is HostPathDirectory": " [Suite:k8s]", + + "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting block device 'ablkdev' when HostPathType is HostPathFile": " [Suite:k8s]", + + "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting block device 'ablkdev' when HostPathType is HostPathSocket": " [Suite:k8s]", + + "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting non-existent block device 'does-not-exist-blk-dev' when HostPathType is HostPathBlockDev": " [Suite:k8s]", + + "[sig-storage] HostPathType Character Device [Slow] Should be able to mount character device 'achardev' successfully when HostPathType is HostPathCharDev": " [Suite:k8s]", + + "[sig-storage] HostPathType Character Device [Slow] Should be able to mount character device 'achardev' successfully when HostPathType is HostPathUnset": " [Suite:k8s]", + + "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting character device 'achardev' when HostPathType is HostPathBlockDev": " [Suite:k8s]", + + "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting character device 'achardev' when HostPathType is HostPathDirectory": " [Suite:k8s]", + + "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting character device 'achardev' when HostPathType is HostPathFile": " [Suite:k8s]", + + "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting character device 'achardev' when HostPathType is HostPathSocket": " [Suite:k8s]", + + "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting non-existent character device 'does-not-exist-char-dev' when HostPathType is HostPathCharDev": " [Suite:k8s]", + + "[sig-storage] HostPathType Directory [Slow] Should be able to mount directory 'adir' successfully when HostPathType is HostPathDirectory": " [Suite:k8s]", + + "[sig-storage] HostPathType Directory [Slow] Should be able to mount directory 'adir' successfully when HostPathType is HostPathUnset": " [Suite:k8s]", + + "[sig-storage] HostPathType Directory [Slow] Should fail on mounting directory 'adir' when HostPathType is HostPathBlockDev": " [Suite:k8s]", + + "[sig-storage] HostPathType Directory [Slow] Should fail on mounting directory 'adir' when HostPathType is HostPathCharDev": " [Suite:k8s]", + + "[sig-storage] HostPathType Directory [Slow] Should fail on mounting directory 'adir' when HostPathType is HostPathFile": " [Suite:k8s]", + + "[sig-storage] HostPathType Directory [Slow] Should fail on mounting directory 'adir' when HostPathType is HostPathSocket": " [Suite:k8s]", + + "[sig-storage] HostPathType Directory [Slow] Should fail on mounting non-existent directory 'does-not-exist-dir' when HostPathType is HostPathDirectory": " [Suite:k8s]", + + "[sig-storage] HostPathType File [Slow] Should be able to mount file 'afile' successfully when HostPathType is HostPathFile": " [Suite:k8s]", + + "[sig-storage] HostPathType File [Slow] Should be able to mount file 'afile' successfully when HostPathType is HostPathUnset": " [Suite:k8s]", + + "[sig-storage] HostPathType File [Slow] Should fail on mounting file 'afile' when HostPathType is HostPathBlockDev": " [Suite:k8s]", + + "[sig-storage] HostPathType File [Slow] Should fail on mounting file 'afile' when HostPathType is HostPathCharDev": " [Suite:k8s]", + + "[sig-storage] HostPathType File [Slow] Should fail on mounting file 'afile' when HostPathType is HostPathDirectory": " [Suite:k8s]", + + "[sig-storage] HostPathType File [Slow] Should fail on mounting file 'afile' when HostPathType is HostPathSocket": " [Suite:k8s]", + + "[sig-storage] HostPathType File [Slow] Should fail on mounting non-existent file 'does-not-exist-file' when HostPathType is HostPathFile": " [Suite:k8s]", + + "[sig-storage] HostPathType Socket [Slow] Should be able to mount socket 'asocket' successfully when HostPathType is HostPathSocket": " [Suite:k8s]", + + "[sig-storage] HostPathType Socket [Slow] Should be able to mount socket 'asocket' successfully when HostPathType is HostPathUnset": " [Suite:k8s]", + + "[sig-storage] HostPathType Socket [Slow] Should fail on mounting non-existent socket 'does-not-exist-socket' when HostPathType is HostPathSocket": " [Suite:k8s]", + + "[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket 'asocket' when HostPathType is HostPathBlockDev": " [Suite:k8s]", + + "[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket 'asocket' when HostPathType is HostPathCharDev": " [Suite:k8s]", + + "[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket 'asocket' when HostPathType is HostPathDirectory": " [Suite:k8s]", + + "[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket 'asocket' when HostPathType is HostPathFile": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], rwop pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Skipped:azure] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Skipped:azure] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:RebaseInProgress] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Disabled:RebaseInProgress] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Disabled:RebaseInProgress] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Disabled:RebaseInProgress] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], rwop pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Disabled:RebaseInProgress] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Disabled:RebaseInProgress] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Disabled:RebaseInProgress] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:RebaseInProgress] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:RebaseInProgress] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:RebaseInProgress] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Disabled:RebaseInProgress] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Disabled:RebaseInProgress] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Disabled:RebaseInProgress] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:RebaseInProgress] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:RebaseInProgress] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Disabled:RebaseInProgress] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:RebaseInProgress] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], rwop pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], rwop pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], rwop pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], rwop pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:Broken] [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], rwop pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], rwop pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], rwop pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], rwop pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], rwop pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], rwop pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], rwop pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], rwop pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], rwop pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], rwop pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], rwop pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] Mounted volume expand [Feature:StorageProvider] Should verify mounted devices can be resized": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] NFSPersistentVolumes [Disruptive] [Flaky] when kube-controller-manager restarts should delete a bound PVC from a clientPod, restart the kube-control-manager, and ensure the kube-controller-manager does not crash": " [Serial] [Suite:k8s]", + + "[sig-storage] NFSPersistentVolumes [Disruptive] [Flaky] when kubelet restarts Should test that a file written to the mount before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] NFSPersistentVolumes [Disruptive] [Flaky] when kubelet restarts Should test that a volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] NFSPersistentVolumes [Disruptive] [Flaky] when kubelet restarts Should test that a volume mounted to a pod that is force deleted while the kubelet is down unmounts when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] PV Protection Verify \"immediate\" deletion of a PV that is not bound to a PVC": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PV Protection Verify that PV bound to a PVC is not removed immediately": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PVC Protection Verify \"immediate\" deletion of a PVC that is not in active use by a pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PVC Protection Verify that PVC in active use by a pod is not removed immediately": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PVC Protection Verify that scheduling of a pod that uses PVC that is being deleted fails and the pod becomes Unschedulable": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes CSI Conformance should apply changes to a pv/pvc status [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] PersistentVolumes CSI Conformance should run through the lifecycle of a PV and a PVC [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] PersistentVolumes Default StorageClass [LinuxOnly] pods that use multiple volumes should be reschedulable [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes NFS when invoking the Recycle reclaim policy should test that a PV becomes Available and is clean after the PVC is deleted.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV and a pre-bound PVC: test phase transition timestamp is set": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV and a pre-bound PVC: test phase transition timestamp multiple updates": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV and a pre-bound PVC: test write access": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV: test phase transition timestamp is set and phase is Available": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PVC and a pre-bound PV: test write access": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PVC and non-pre-bound PV: test write access": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs should create a non-pre-bound PV and PVC: test write access ": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 2 PVs and 4 PVCs: test write access": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 3 PVs and 3 PVCs: test write access": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 4 PVs and 2 PVCs: test write access [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-expansion loopback local block volume should support online expansion on node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local Local volume that cannot be mounted [Slow] should fail due to non-existent path": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local Local volume that cannot be mounted [Slow] should fail due to wrong node": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local Pod with node different from PV's NodeAffinity should fail scheduling due to different NodeAffinity": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local Pod with node different from PV's NodeAffinity should fail scheduling due to different NodeSelector": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes on one node when pod has affinity": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes on one node when pod management is parallel and pod has affinity": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes spread across nodes when pod has anti-affinity": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes spread across nodes when pod management is parallel and pod has anti-affinity": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local Stress with local volumes [Serial] should be able to process many pods and reuse local volumes": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: block] One pod requesting one prebound PVC should be able to mount volume and read from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: block] One pod requesting one prebound PVC should be able to mount volume and write from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: block] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: block] Set fsGroup for local volume should set fsGroup for one pod [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: block] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: block] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: block] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] One pod requesting one prebound PVC should be able to mount volume and read from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] One pod requesting one prebound PVC should be able to mount volume and write from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Set fsGroup for local volume should set fsGroup for one pod [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] One pod requesting one prebound PVC should be able to mount volume and read from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] One pod requesting one prebound PVC should be able to mount volume and write from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Set fsGroup for local volume should set fsGroup for one pod [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] One pod requesting one prebound PVC should be able to mount volume and read from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] One pod requesting one prebound PVC should be able to mount volume and write from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Set fsGroup for local volume should set fsGroup for one pod [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] One pod requesting one prebound PVC should be able to mount volume and read from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] One pod requesting one prebound PVC should be able to mount volume and write from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Set fsGroup for local volume should set fsGroup for one pod [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link] One pod requesting one prebound PVC should be able to mount volume and read from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link] One pod requesting one prebound PVC should be able to mount volume and write from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Set fsGroup for local volume should set fsGroup for one pod [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir] One pod requesting one prebound PVC should be able to mount volume and read from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir] One pod requesting one prebound PVC should be able to mount volume and write from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir] Set fsGroup for local volume should set fsGroup for one pod [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] One pod requesting one prebound PVC should be able to mount volume and read from pod1": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] One pod requesting one prebound PVC should be able to mount volume and write from pod1": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Set fsGroup for local volume should set fsGroup for one pod [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] One pod requesting one prebound PVC should be able to mount volume and read from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] One pod requesting one prebound PVC should be able to mount volume and write from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Set fsGroup for local volume should set fsGroup for one pod [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Projected combined should project all components that make up the projection API [Projection] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected configMap Should fail non-optional pod creation due to configMap object does not exist [Slow]": " [Suite:k8s]", + + "[sig-storage] Projected configMap Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]": " [Suite:k8s]", + + "[sig-storage] Projected configMap optional updates should be reflected in volume [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected configMap should be consumable from pods in volume [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected configMap should be consumable from pods in volume as non-root [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected configMap should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Projected configMap should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Projected configMap should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected configMap should be consumable from pods in volume with mappings [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected configMap should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected configMap should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected configMap should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Projected configMap should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected configMap updates should be reflected in volume [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should provide container's cpu limit [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should provide container's cpu request [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should provide container's memory limit [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should provide container's memory request [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should provide podname only [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should update annotations on modification [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should update labels on modification [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected secret Should fail non-optional pod creation due to secret object does not exist [Slow]": " [Suite:k8s]", + + "[sig-storage] Projected secret Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]": " [Suite:k8s]", + + "[sig-storage] Projected secret optional updates should be reflected in volume [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected secret should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Projected secret should be consumable from pods in volume [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected secret should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected secret should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected secret should be consumable from pods in volume with mappings [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected secret should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected secret should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Retroactive StorageClass Assignment should assign default StorageClass to PVCs retroactively [Disruptive] [Serial]": " [Suite:k8s]", + + "[sig-storage] Secrets Should fail non-optional pod creation due to secret object does not exist [Slow]": " [Suite:k8s]", + + "[sig-storage] Secrets Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]": " [Suite:k8s]", + + "[sig-storage] Secrets optional updates should be reflected in volume [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Secrets should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Secrets should be consumable from pods in volume [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Secrets should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Secrets should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Secrets should be consumable from pods in volume with mappings [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Secrets should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Secrets should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Secrets should be immutable if `immutable` field is set [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] StaticPods [Feature:Kind] should run after kubelet stopped with CSI volume mounted [Disruptive] [Serial]": " [Suite:k8s]", + + "[sig-storage] StorageClasses CSI Conformance should run through the lifecycle of a StorageClass [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Subpath Atomic writer volumes should support subpaths with configmap pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Subpath Atomic writer volumes should support subpaths with configmap pod with mountPath of existing file [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Subpath Atomic writer volumes should support subpaths with downward pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Subpath Atomic writer volumes should support subpaths with projected pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Subpath Atomic writer volumes should support subpaths with secret pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Subpath Container restart should verify that container can restart successfully after configmaps modified": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] VolumeAttachment Conformance should apply changes to a volumeattachment status [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] VolumeAttachment Conformance should run through the lifecycle of a VolumeAttachment [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] VolumeAttributesClass [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should run through the lifecycle of a VolumeAttributesClass": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] Volumes ConfigMap should be mountable": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Volumes NFSv3 should be mountable for NFSv3": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Volumes NFSv4 should be mountable for NFSv4": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] [Feature:Flexvolumes] Detaching volumes should not work when mount is in progress [Slow]": " [Suite:k8s]", + + "[sig-storage] [Feature:Flexvolumes] Mounted flexvolume expand [Slow] Should verify mounted flex volumes can be resized": " [Suite:k8s]", + + "[sig-storage] [Feature:Flexvolumes] Mounted flexvolume volume expand [Slow] should be resizable when mounted": " [Suite:k8s]", + + "[sig-storage] [Feature:NodeOutOfServiceVolumeDetach] [Disruptive] [LinuxOnly] NonGracefulNodeShutdown [NonGracefulNodeShutdown] pod that uses a persistent volume via gce pd driver should get immediately rescheduled to a different node after non graceful node shutdown ": " [Serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics Ephemeral should create metrics for total number of volumes in A/D Controller": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics Ephemeral should create metrics for total time taken in volume operations in P/V Controller": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics Ephemeral should create prometheus metrics for volume provisioning and attach/detach": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics Ephemeral should create prometheus metrics for volume provisioning errors [Slow]": " [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics Ephemeral should create volume metrics in Volume Manager": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics Ephemeral should create volume metrics with the correct BlockMode PVC ref": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics Ephemeral should create volume metrics with the correct FilesystemMode PVC ref": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVC should create metrics for total number of volumes in A/D Controller": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVC should create metrics for total time taken in volume operations in P/V Controller": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVC should create prometheus metrics for volume provisioning and attach/detach": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVC should create prometheus metrics for volume provisioning errors [Slow]": " [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVC should create volume metrics in Volume Manager": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVC should create volume metrics with the correct BlockMode PVC ref": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVC should create volume metrics with the correct FilesystemMode PVC ref": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVController should create bound pv/pvc count metrics for pvc controller after creating both pv and pvc": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVController should create bound pv/pvc count metrics for pvc controller with volume attributes class dimension after creating both pv and pvc [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVController should create none metrics for pvc controller before creating any PV or PVC": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVController should create total pv count metrics for with plugin and volume mode labels after creating pv": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVController should create unbound pv count metrics for pvc controller after creating pv only": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVController should create unbound pvc count metrics for pvc controller after creating pvc only": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVController should create unbound pvc count metrics for pvc controller with volume attributes class dimension after creating pvc only [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-windows] Hybrid cluster network for all supported CNIs should have stable networking for Linux and Windows pods": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] Hybrid cluster network for all supported CNIs should provide Internet connection and DNS for Windows containers [Feature:Networking-IPv4] [Feature:Networking-DNS]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] Hybrid cluster network for all supported CNIs should provide Internet connection for Linux containers [Feature:Networking-IPv4]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] Services should be able to create a functioning NodePort service for Windows": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:GPUDevicePlugin] Device Plugin should be able to create a functioning device plugin for Windows": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers container command path validation": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers container stats validation": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers metrics should report count of started and failed to start HostProcess containers": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should run as a process on the host/node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should run as localgroup accounts": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should support init containers": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should support querying api-server using in-cluster config": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should support various volume mount types": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:WindowsHyperVContainers] HyperV containers should start a hyperv isolated container": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] Cpu Resources [Serial] Container limits should not be exceeded after waiting 2 minutes": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] DNS should support configurable pod DNS servers": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] Density [Serial] [Slow] create a batch of pods latency/resource should be within limit when create 10 pods with 0s interval": " [Suite:k8s]", + + "[sig-windows] [Feature:Windows] Eviction [Serial] [Slow] [Disruptive] should evict a pod when a node experiences memory pressure": " [Suite:k8s]", + + "[sig-windows] [Feature:Windows] GMSA Full [Serial] [Slow] GMSA support can read and write file to remote SMB folder": " [Suite:k8s]", + + "[sig-windows] [Feature:Windows] GMSA Full [Serial] [Slow] GMSA support works end to end": " [Suite:k8s]", + + "[sig-windows] [Feature:Windows] GMSA Kubelet [Slow] kubelet GMSA support when creating a pod with correct GMSA credential specs passes the credential specs down to the Pod's containers": " [Suite:k8s]", + + "[sig-windows] [Feature:Windows] GracefulNodeShutdown [Serial] [Disruptive] [Slow] should be able to gracefully shutdown pods with various grace periods": " [Suite:k8s]", + + "[sig-windows] [Feature:Windows] Kubelet-Stats Kubelet stats collection for Windows nodes when running 3 pods should return within 10 seconds": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] Kubelet-Stats Kubelet stats collection for Windows nodes when windows is booted should return bootid within 10 seconds": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] Kubelet-Stats [Serial] Kubelet stats collection for Windows nodes when running 10 pods should return within 10 seconds": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] Memory Limits [Serial] [Slow] Allocatable node memory should be equal to a calculated allocatable memory value": " [Suite:k8s]", + + "[sig-windows] [Feature:Windows] Memory Limits [Serial] [Slow] attempt to deploy past allocatable memory limits should fail deployments of pods once there isn't enough memory": " [Suite:k8s]", + + "[sig-windows] [Feature:Windows] SecurityContext should be able create pods and run containers with a given username": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] SecurityContext should be able to create pod and run containers": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] SecurityContext should ignore Linux Specific SecurityContext if set": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] SecurityContext should not be able to create pods with containers running as CONTAINERADMINISTRATOR when runAsNonRoot is true": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] SecurityContext should not be able to create pods with containers running as ContainerAdministrator when runAsNonRoot is true": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] SecurityContext should not be able to create pods with unknown usernames at Container level": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] SecurityContext should not be able to create pods with unknown usernames at Pod level": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] SecurityContext should override SecurityContext username if set": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] Windows volume mounts check volume mount permissions container should have readOnly permissions on emptyDir": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] Windows volume mounts check volume mount permissions container should have readOnly permissions on hostMapPath": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletVersion:1.22] RebootHost containers [Serial] [Disruptive] [Slow] should run as a reboot process on the host/node": " [Suite:k8s]", +} + +func init() { + ginkgo.GetSuite().SetAnnotateFn(func(name string, node types.TestSpec) { + if newLabels, ok := Annotations[name]; ok { + node.AppendText(newLabels) + } else { + panic(fmt.Sprintf("unable to find test %s", name)) + } + }) +} diff --git a/openshift-hack/e2e/annotate/rules.go b/openshift-hack/e2e/annotate/rules.go new file mode 100644 index 0000000000000..27f16410e5fe1 --- /dev/null +++ b/openshift-hack/e2e/annotate/rules.go @@ -0,0 +1,442 @@ +package annotate + +import ( + // ensure all the ginkgo tests are loaded + _ "k8s.io/kubernetes/openshift-hack/e2e" +) + +var ( + TestMaps = map[string][]string{ + // alpha features that are not gated + "[Disabled:Alpha]": { + `\[Feature:StorageVersionAPI\]`, + `\[Feature:InPlacePodVerticalScaling\]`, + `\[Feature:ServiceCIDRs\]`, + `\[Feature:ClusterTrustBundle\]`, + `\[Feature:SELinuxMount\]`, + `\[FeatureGate:SELinuxMount\]`, + `\[Feature:UserNamespacesPodSecurityStandards\]`, + `\[Feature:UserNamespacesSupport\]`, // disabled Beta + `\[Feature:DynamicResourceAllocation\]`, + `\[Feature:VolumeAttributesClass\]`, // disabled Beta + `\[sig-cli\] Kubectl client Kubectl prune with applyset should apply and prune objects`, // Alpha feature since k8s 1.27 + // 4.19 + `\[Feature:PodLevelResources\]`, + `\[Feature:SchedulerAsyncPreemption\]`, + `\[Feature:RelaxedDNSSearchValidation\]`, + `\[Feature:PodLogsQuerySplitStreams\]`, + `\[Feature:PodLifecycleSleepActionAllowZero\]`, + }, + // tests for features that are not implemented in openshift + "[Disabled:Unimplemented]": { + `Monitoring`, // Not installed, should be + `Cluster level logging`, // Not installed yet + `Kibana`, // Not installed + `Ubernetes`, // Can't set zone labels today + `kube-ui`, // Not installed by default + `Kubernetes Dashboard`, // Not installed by default (also probably slow image pull) + `should proxy to cadvisor`, // we don't expose cAdvisor port directly for security reasons + `\[Feature:BootstrapTokens\]`, // we don't serve cluster-info configmap + `\[Feature:KubeProxyDaemonSetMigration\]`, // upgrades are run separately + `\[Feature:BoundServiceAccountTokenVolume\]`, // upgrades are run separately + `\[Feature:StatefulUpgrade\]`, // upgrades are run separately + }, + // tests that rely on special configuration that we do not yet support + "[Disabled:SpecialConfig]": { + // GPU node needs to be available + `\[Feature:GPUDevicePlugin\]`, + `\[sig-scheduling\] GPUDevicePluginAcrossRecreate \[Feature:Recreate\]`, + + `\[Feature:LocalStorageCapacityIsolation\]`, // relies on a separate daemonset? + `\[sig-cloud-provider-gcp\]`, // these test require a different configuration - note that GCE tests from the sig-cluster-lifecycle were moved to the sig-cloud-provider-gcpcluster lifecycle see https://github.com/kubernetes/kubernetes/commit/0b3d50b6dccdc4bbd0b3e411c648b092477d79ac#diff-3b1910d08fb8fd8b32956b5e264f87cb + + `kube-dns-autoscaler`, // Don't run kube-dns + `should check if Kubernetes master services is included in cluster-info`, // Don't run kube-dns + `DNS configMap`, // this tests dns federation configuration via configmap, which we don't support yet + + `NodeProblemDetector`, // requires a non-master node to run on + `Advanced Audit should audit API calls`, // expects to be able to call /logs + + `Firewall rule should have correct firewall rules for e2e cluster`, // Upstream-install specific + + // https://bugzilla.redhat.com/show_bug.cgi?id=2079958 + `\[sig-network\] \[Feature:Topology Hints\] should distribute endpoints evenly`, + + // Tests require SSH configuration and is part of the parallel suite, which does not create the bastion + // host. Enabling the test would result in the bastion being created for every parallel test execution. + // Given that we have existing oc and WMCO tests that cover this functionality, we can safely disable it. + `\[Feature:NodeLogQuery\]`, + }, + // tests that are known broken and need to be fixed upstream or in openshift + // always add an issue here + "[Disabled:Broken]": { + `mount an API token into pods`, // We add 6 secrets, not 1 + `ServiceAccounts should ensure a single API token exists`, // We create lots of secrets + `unchanging, static URL paths for kubernetes api services`, // the test needs to exclude URLs that are not part of conformance (/logs) + `Services should be able to up and down services`, // we don't have wget installed on nodes + `KubeProxy should set TCP CLOSE_WAIT timeout`, // the test require communication to port 11302 in the cluster nodes + `should check kube-proxy urls`, // previously this test was skipped b/c we reported -1 as the number of nodes, now we report proper number and test fails + `SSH`, // TRIAGE + `should implement service.kubernetes.io/service-proxy-name`, // this is an optional test that requires SSH. sig-network + `recreate nodes and ensure they function upon restart`, // https://bugzilla.redhat.com/show_bug.cgi?id=1756428 + `\[Driver: iscsi\]`, // https://bugzilla.redhat.com/show_bug.cgi?id=1711627 + + "RuntimeClass should reject", + + `Services should implement service.kubernetes.io/headless`, // requires SSH access to function, needs to be refactored + `ClusterDns \[Feature:Example\] should create pod that uses dns`, // doesn't use bindata, not part of kube test binary + `Simple pod should return command exit codes should handle in-cluster config`, // kubectl cp doesn't work or is not preserving executable bit, we have this test already + + // TODO(node): configure the cri handler for the runtime class to make this work + "should run a Pod requesting a RuntimeClass with a configured handler", + "should reject a Pod requesting a RuntimeClass with conflicting node selector", + "should run a Pod requesting a RuntimeClass with scheduling", + + // A fix is in progress: https://github.com/openshift/origin/pull/24709 + `Multi-AZ Clusters should spread the pods of a replication controller across zones`, + + // Upstream assumes all control plane pods are in kube-system namespace and we should revert the change + // https://github.com/kubernetes/kubernetes/commit/176c8e219f4c7b4c15d34b92c50bfa5ba02b3aba#diff-28a3131f96324063dd53e17270d435a3b0b3bd8f806ee0e33295929570eab209R78 + "MetricsGrabber should grab all metrics from a Kubelet", + "MetricsGrabber should grab all metrics from API server", + "MetricsGrabber should grab all metrics from a ControllerManager", + "MetricsGrabber should grab all metrics from a Scheduler", + + // https://bugzilla.redhat.com/show_bug.cgi?id=1906808 + `ServiceAccounts should support OIDC discovery of service account issuer`, + + // NFS umount is broken in kernels 5.7+ + // https://bugzilla.redhat.com/show_bug.cgi?id=1854379 + `\[sig-storage\].*\[Driver: nfs\] \[Testpattern: Dynamic PV \(default fs\)\].*subPath should be able to unmount after the subpath directory is deleted`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1986306 + `\[sig-cli\] Kubectl client kubectl wait should ignore not found error with --for=delete`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1980141 + `Netpol NetworkPolicy between server and client should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector`, + `Netpol NetworkPolicy between server and client should enforce policy to allow traffic from pods within server namespace based on PodSelector`, + `Netpol NetworkPolicy between server and client should enforce policy based on NamespaceSelector with MatchExpressions`, + `Netpol NetworkPolicy between server and client should enforce policy based on PodSelector with MatchExpressions`, + `Netpol NetworkPolicy between server and client should enforce policy based on PodSelector or NamespaceSelector`, + `Netpol NetworkPolicy between server and client should deny ingress from pods on other namespaces`, + `Netpol NetworkPolicy between server and client should enforce updated policy`, + `Netpol NetworkPolicy between server and client should enforce multiple, stacked policies with overlapping podSelectors`, + `Netpol NetworkPolicy between server and client should enforce policy based on any PodSelectors`, + `Netpol NetworkPolicy between server and client should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector`, + `Netpol \[LinuxOnly\] NetworkPolicy between server and client using UDP should support a 'default-deny-ingress' policy`, + `Netpol \[LinuxOnly\] NetworkPolicy between server and client using UDP should enforce policy based on Ports`, + `Netpol \[LinuxOnly\] NetworkPolicy between server and client using UDP should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector`, + + `Topology Hints should distribute endpoints evenly`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1908645 + `\[sig-network\] Networking Granular Checks: Services should function for service endpoints using hostNetwork`, + `\[sig-network\] Networking Granular Checks: Services should function for pod-Service\(hostNetwork\)`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1952460 + `\[sig-network\] Firewall rule control plane should not expose well-known ports`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1988272 + `\[sig-network\] Networking should provide Internet connection for containers \[Feature:Networking-IPv6\]`, + `\[sig-network\] Networking should provider Internet connection for containers using DNS`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1957894 + `\[sig-node\] Container Runtime blackbox test when running a container with a new image should be able to pull from private registry with secret`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1952457 + `\[sig-node\] crictl should be able to run crictl on the node`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1953478 + `\[sig-storage\] Dynamic Provisioning Invalid AWS KMS key should report an error and create no PV`, + + // https://issues.redhat.com/browse/OCPBUGS-34577 + `\[sig-storage\] Multi-AZ Cluster Volumes should schedule pods in the same zones as statically provisioned PVs`, + + // https://issues.redhat.com/browse/OCPBUGS-34594 + `\[sig-node\] \[Feature:PodLifecycleSleepAction\] when create a pod with lifecycle hook using sleep action valid prestop hook using sleep action`, + + // https://issues.redhat.com/browse/OCPBUGS-38839 + `\[sig-network\] \[Feature:Traffic Distribution\] when Service has trafficDistribution=PreferClose should route traffic to an endpoint that is close to the client`, + }, + // tests that need to be temporarily disabled while the rebase is in progress. + "[Disabled:RebaseInProgress]": { + // https://issues.redhat.com/browse/OCPBUGS-7297 + `DNS HostNetwork should resolve DNS of partial qualified names for services on hostNetwork pods with dnsPolicy`, + + // https://issues.redhat.com/browse/OCPBUGS-45275 + `\[sig-network\] Connectivity Pod Lifecycle should be able to connect to other Pod from a terminating Pod`, + + // https://issues.redhat.com/browse/OCPBUGS-17194 + `\[sig-node\] ImageCredentialProvider \[Feature:KubeletCredentialProviders\] should be able to create pod with image credentials fetched from external credential provider`, + + // https://issues.redhat.com/browse/OCPBUGS-45214 + // Even though this feature is not GA in k/k, it will be GA in OCP 4.19, so we should fix it and unskip this test + `\[Feature:volumegroupsnapshot\]`, + + // https://issues.redhat.com/browse/OCPBUGS-45273 + `\[sig-network\] Services should implement NodePort and HealthCheckNodePort correctly when ExternalTrafficPolicy changes`, + + // https://issues.redhat.com/browse/OCPBUGS-45273 + `\[sig-cli\] Kubectl Port forwarding Shutdown client connection while the remote stream is writing data to the port-forward connection port-forward should keep working after detect broken connection`, + + // https://issues.redhat.com/browse/OCPBUGS-45274 + // https://github.com/kubernetes/kubernetes/issues/129056 + `\[sig-node\] PodRejectionStatus Kubelet should reject pod when the node didn't have enough resource`, + + // https://issues.redhat.com/browse/OCPBUGS-45359 + `\[Feature:RecoverVolumeExpansionFailure\]`, + + // https://issues.redhat.com/browse/OCPBUGS-46477 + `\[sig-storage\] In-tree Volumes \[Driver: azure-file\]`, + }, + // tests that may work, but we don't support them + "[Disabled:Unsupported]": { + `\[Driver: rbd\]`, // OpenShift 4.x does not support Ceph RBD (use CSI instead) + `\[Driver: ceph\]`, // OpenShift 4.x does not support CephFS (use CSI instead) + `\[Driver: gluster\]`, // OpenShift 4.x does not support Gluster + `Volumes GlusterFS`, // OpenShift 4.x does not support Gluster + `GlusterDynamicProvisioner`, // OpenShift 4.x does not support Gluster + + // Skip vSphere-specific storage tests. The standard in-tree storage tests for vSphere + // (prefixed with `In-tree Volumes [Driver: vsphere]`) are enough for testing this plugin. + // https://bugzilla.redhat.com/show_bug.cgi?id=2019115 + `\[sig-storage\].*\[Feature:vsphere\]`, + // Also, our CI doesn't support topology, so disable those tests + `\[sig-storage\] In-tree Volumes \[Driver: vsphere\] \[Testpattern: Dynamic PV \(delayed binding\)\] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies`, + `\[sig-storage\] In-tree Volumes \[Driver: vsphere\] \[Testpattern: Dynamic PV \(delayed binding\)\] topology should provision a volume and schedule a pod with AllowedTopologies`, + `\[sig-storage\] In-tree Volumes \[Driver: vsphere\] \[Testpattern: Dynamic PV \(immediate binding\)\] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies`, + `\[sig-storage\] In-tree Volumes \[Driver: vsphere\] \[Testpattern: Dynamic PV \(immediate binding\)\] topology should provision a volume and schedule a pod with AllowedTopologies`, + }, + // tests too slow to be part of conformance + "[Slow]": { + `\[sig-scalability\]`, // disable from the default set for now + `should create and stop a working application`, // Inordinately slow tests + + `\[Feature:PerformanceDNS\]`, // very slow + + `validates that there exists conflict between pods with same hostPort and protocol but one using 0\.0\.0\.0 hostIP`, // 5m, really? + }, + // tests that are known flaky + "[Flaky]": { + `Job should run a job to completion when tasks sometimes fail and are not locally restarted`, // seems flaky, also may require too many resources + // TODO(node): test works when run alone, but not in the suite in CI + `\[Feature:HPA\] Horizontal pod autoscaling \(scale resource: CPU\) \[sig-autoscaling\] ReplicationController light Should scale from 1 pod to 2 pods`, + }, + // tests that must be run without competition + "[Serial]": { + `\[Disruptive\]`, + `\[Feature:Performance\]`, // requires isolation + + `Service endpoints latency`, // requires low latency + `Clean up pods on node`, // schedules up to max pods per node + `DynamicProvisioner should test that deleting a claim before the volume is provisioned deletes the volume`, // test is very disruptive to other tests + + `Should be able to support the 1\.7 Sample API Server using the current Aggregator`, // down apiservices break other clients today https://bugzilla.redhat.com/show_bug.cgi?id=1623195 + + `\[Feature:HPA\] Horizontal pod autoscaling \(scale resource: CPU\) \[sig-autoscaling\] ReplicationController light Should scale from 1 pod to 2 pods`, + + `should prevent Ingress creation if more than 1 IngressClass marked as default`, // https://bugzilla.redhat.com/show_bug.cgi?id=1822286 + + `\[sig-network\] IngressClass \[Feature:Ingress\] should set default value on new IngressClass`, //https://bugzilla.redhat.com/show_bug.cgi?id=1833583 + }, + // Tests that don't pass on disconnected, either due to requiring + // internet access for GitHub (e.g. many of the s2i builds), or + // because of pullthrough not supporting ICSP (https://bugzilla.redhat.com/show_bug.cgi?id=1918376) + "[Skipped:Disconnected]": { + // Internet access required + `\[sig-network\] Networking should provide Internet connection for containers`, + }, + "[Skipped:alibabacloud]": { + // LoadBalancer tests in 1.31 require explicit platform-specific skips + // https://issues.redhat.com/browse/OCPBUGS-38840 + `\[Feature:LoadBalancer\]`, + }, + "[Skipped:aws]": { + // LoadBalancer tests in 1.31 require explicit platform-specific skips + // https://issues.redhat.com/browse/OCPBUGS-38840 + `\[sig-network\] LoadBalancers \[Feature:LoadBalancer\] .* UDP`, + `\[sig-network\] LoadBalancers \[Feature:LoadBalancer\] .* session affinity`, + }, + "[Skipped:azure]": { + "Networking should provide Internet connection for containers", // Azure does not allow ICMP traffic to internet. + // Azure CSI migration changed how we treat regions without zones. + // See https://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=2066865 + `\[sig-storage\] In-tree Volumes \[Driver: azure-disk\] \[Testpattern: Dynamic PV \(immediate binding\)\] topology should provision a volume and schedule a pod with AllowedTopologies`, + `\[sig-storage\] In-tree Volumes \[Driver: azure-disk\] \[Testpattern: Dynamic PV \(delayed binding\)\] topology should provision a volume and schedule a pod with AllowedTopologies`, + }, + "[Skipped:baremetal]": { + // LoadBalancer tests in 1.31 require explicit platform-specific skips + // https://issues.redhat.com/browse/OCPBUGS-38840 + `\[Feature:LoadBalancer\]`, + }, + "[Skipped:gce]": { + // Requires creation of a different compute instance in a different zone and is not compatible with volumeBindingMode of WaitForFirstConsumer which we use in 4.x + `\[sig-storage\] Multi-AZ Cluster Volumes should only be allowed to provision PDs in zones where nodes exist`, + + // The following tests try to ssh directly to a node. None of our nodes have external IPs + `\[k8s.io\] \[sig-node\] crictl should be able to run crictl on the node`, + `\[sig-storage\] Flexvolumes should be mountable`, + `\[sig-storage\] Detaching volumes should not work when mount is in progress`, + + // We are using ovn-kubernetes to conceal metadata + `\[sig-auth\] Metadata Concealment should run a check-metadata-concealment job to completion`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1740959 + `\[sig-api-machinery\] AdmissionWebhook should be able to deny pod and configmap creation`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1745720 + `\[sig-storage\] CSI Volumes \[Driver: pd.csi.storage.gke.io\]`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1749882 + `\[sig-storage\] CSI Volumes CSI Topology test using GCE PD driver \[Serial\]`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1751367 + `gce-localssd-scsi-fs`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1750851 + // should be serial if/when it's re-enabled + `\[HPA\] Horizontal pod autoscaling \(scale resource: Custom Metrics from Stackdriver\)`, + `\[Feature:CustomMetricsAutoscaling\]`, + }, + "[Skipped:ibmcloud]": { + // LoadBalancer tests in 1.31 require explicit platform-specific skips + // https://issues.redhat.com/browse/OCPBUGS-38840 + `\[Feature:LoadBalancer\]`, + }, + "[Skipped:kubevirt]": { + // LoadBalancer tests in 1.31 require explicit platform-specific skips + // https://issues.redhat.com/browse/OCPBUGS-38840 + `\[Feature:LoadBalancer\]`, + }, + "[Skipped:nutanix]": { + // LoadBalancer tests in 1.31 require explicit platform-specific skips + // https://issues.redhat.com/browse/OCPBUGS-38840 + `\[Feature:LoadBalancer\]`, + }, + "[Skipped:openstack]": { + // LoadBalancer tests in 1.31 require explicit platform-specific skips + // https://issues.redhat.com/browse/OCPBUGS-38840 + `\[Feature:LoadBalancer\]`, + }, + "[Skipped:ovirt]": { + // LoadBalancer tests in 1.31 require explicit platform-specific skips + // https://issues.redhat.com/browse/OCPBUGS-38840 + `\[Feature:LoadBalancer\]`, + }, + "[Skipped:vsphere]": { + // LoadBalancer tests in 1.31 require explicit platform-specific skips + // https://issues.redhat.com/browse/OCPBUGS-38840 + `\[Feature:LoadBalancer\]`, + }, + + "[sig-node]": { + `\[NodeConformance\]`, + `NodeLease`, + `lease API`, + `\[NodeFeature`, + `\[NodeAlphaFeature`, + `Probing container`, + `Security Context When creating a`, + `Downward API should create a pod that prints his name and namespace`, + `Liveness liveness pods should be automatically restarted`, + `Secret should create a pod that reads a secret`, + `Pods should delete a collection of pods`, + `Pods should run through the lifecycle of Pods and PodStatus`, + }, + "[sig-cluster-lifecycle]": { + `Feature:ClusterAutoscalerScalability`, + `recreate nodes and ensure they function`, + }, + "[sig-arch]": { + // not run, assigned to arch as catch-all + `\[Feature:GKELocalSSD\]`, + `\[Feature:GKENodePool\]`, + }, + + // These tests are skipped when openshift-tests needs to use a proxy to reach the + // cluster -- either because the test won't work while proxied, or because the test + // itself is testing a functionality using it's own proxy. + "[Skipped:Proxy]": { + // These tests setup their own proxy, which won't work when we need to access the + // cluster through a proxy. + `\[sig-cli\] Kubectl client Simple pod should support exec through an HTTP proxy`, + `\[sig-cli\] Kubectl client Simple pod should support exec through kubectl proxy`, + + // Kube currently uses the x/net/websockets pkg, which doesn't work with proxies. + // See: https://github.com/kubernetes/kubernetes/pull/103595 + `\[sig-node\] Pods should support retrieving logs from the container over websockets`, + `\[sig-cli\] Kubectl Port forwarding With a server listening on localhost should support forwarding over websockets`, + `\[sig-cli\] Kubectl Port forwarding With a server listening on 0.0.0.0 should support forwarding over websockets`, + `\[sig-node\] Pods should support remote command execution over websockets`, + + // These tests are flacky and require internet access + // See https://bugzilla.redhat.com/show_bug.cgi?id=2019375 + `\[sig-network\] DNS should resolve DNS of partial qualified names for services`, + `\[sig-network\] DNS should provide DNS for the cluster`, + // This test does not work when using in-proxy cluster, see https://bugzilla.redhat.com/show_bug.cgi?id=2084560 + `\[sig-network\] Networking should provide Internet connection for containers`, + }, + + "[Skipped:SingleReplicaTopology]": { + `\[sig-apps\] Daemon set \[Serial\] should rollback without unnecessary restarts \[Conformance\]`, + `\[sig-node\] NoExecuteTaintManager Single Pod \[Serial\] doesn't evict pod with tolerations from tainted nodes`, + `\[sig-node\] NoExecuteTaintManager Single Pod \[Serial\] eventually evict pod with finite tolerations from tainted nodes`, + `\[sig-node\] NoExecuteTaintManager Single Pod \[Serial\] evicts pods from tainted nodes`, + `\[sig-node\] NoExecuteTaintManager Single Pod \[Serial\] removing taint cancels eviction \[Disruptive\] \[Conformance\]`, + `\[sig-node\] NoExecuteTaintManager Single Pod \[Serial\] pods evicted from tainted nodes have pod disruption condition`, + `\[sig-node\] NoExecuteTaintManager Multiple Pods \[Serial\] evicts pods with minTolerationSeconds \[Disruptive\] \[Conformance\]`, + `\[sig-node\] NoExecuteTaintManager Multiple Pods \[Serial\] only evicts pods without tolerations from tainted nodes`, + `\[sig-cli\] Kubectl client Kubectl taint \[Serial\] should remove all the taints with the same key off a node`, + `\[sig-network\] LoadBalancers should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on different nodes`, + `\[sig-network\] LoadBalancers should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on the same nodes`, + `\[sig-architecture\] Conformance Tests should have at least two untainted nodes`, + }, + + // Tests which can't be run/don't make sense to run against a cluster with all optional capabilities disabled + "[Skipped:NoOptionalCapabilities]": { + // Requires CSISnapshot capability + `\[Feature:VolumeSnapshotDataSource\]`, + // Requires Storage capability + `\[Driver: aws\]`, + `\[Feature:StorageProvider\]`, + }, + + // tests that don't pass under OVN Kubernetes + "[Skipped:Network/OVNKubernetes]": { + // ovn-kubernetes does not support named ports + `NetworkPolicy.*named port`, + }, + + "[Skipped:ibmroks]": { + // Calico is allowing the request to timeout instead of returning 'REFUSED' + // https://bugzilla.redhat.com/show_bug.cgi?id=1825021 - ROKS: calico SDN results in a request timeout when accessing services with no endpoints + `\[sig-network\] Services should be rejected when no endpoints exist`, + + // Nodes in ROKS have access to secrets in the cluster to handle encryption + // https://bugzilla.redhat.com/show_bug.cgi?id=1825013 - ROKS: worker nodes have access to secrets in the cluster + `\[sig-auth\] \[Feature:NodeAuthorizer\] Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error`, + `\[sig-auth\] \[Feature:NodeAuthorizer\] Getting a non-existent secret should exit with the Forbidden error, not a NotFound error`, + `\[sig-auth\] \[Feature:NodeAuthorizer\] Getting a secret for a workload the node has access to should succeed`, + `\[sig-auth\] \[Feature:NodeAuthorizer\] Getting an existing configmap should exit with the Forbidden error`, + `\[sig-auth\] \[Feature:NodeAuthorizer\] Getting an existing secret should exit with the Forbidden error`, + + // Access to node external address is blocked from pods within a ROKS cluster by Calico + // https://bugzilla.redhat.com/show_bug.cgi?id=1825016 - e2e: NodeAuthenticator tests use both external and internal addresses for node + `\[sig-auth\] \[Feature:NodeAuthenticator\] The kubelet's main port 10250 should reject requests with no credentials`, + `\[sig-auth\] \[Feature:NodeAuthenticator\] The kubelet can delegate ServiceAccount tokens to the API server`, + + // Mode returned by RHEL7 worker contains an extra character not expected by the test: dgtrwx vs dtrwx + // https://bugzilla.redhat.com/show_bug.cgi?id=1825024 - e2e: Failing test - HostPath should give a volume the correct mode + `\[sig-storage\] HostPath should give a volume the correct mode`, + }, + } + + ExcludedTests = []string{ + `\[Disabled:`, + `\[Disruptive\]`, + `\[Skipped\]`, + `\[Slow\]`, + `\[Flaky\]`, + `\[Local\]`, + } +) diff --git a/openshift-hack/e2e/annotate/rules_test.go b/openshift-hack/e2e/annotate/rules_test.go new file mode 100644 index 0000000000000..6a413a2ef92ac --- /dev/null +++ b/openshift-hack/e2e/annotate/rules_test.go @@ -0,0 +1,92 @@ +package annotate + +import ( + "testing" + + "github.com/onsi/ginkgo/v2/types" +) + +type testNode struct { + text string +} + +func (n *testNode) CodeLocations() []types.CodeLocation { + return []types.CodeLocation{{FileName: "k8s.io/kubernetes"}} +} + +func (n *testNode) Text() string { + return n.text +} + +func (n *testNode) AppendText(text string) { + n.text += text +} + +func (n *testNode) Labels() []string { + return nil +} + +func TestStockRules(t *testing.T) { + tests := []struct { + name string + + testName string + + expectedLabel string + expectedText string + }{ + { + name: "simple serial match", + testName: "[Serial] test", + expectedLabel: " [Suite:openshift/conformance/serial]", + expectedText: "[Serial] test [Suite:openshift/conformance/serial]", + }, + { + name: "don't tag skipped", + testName: `[Serial] example test [Skipped:gce]`, + expectedLabel: ` [Suite:openshift/conformance/serial]`, + expectedText: `[Serial] example test [Skipped:gce] [Suite:openshift/conformance/serial]`, // notice that this isn't categorized into any of our buckets + }, + { + name: "not skipped", + testName: `[sig-network] Networking Granular Checks: Pods should function for intra-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]`, + expectedLabel: ` [Suite:openshift/conformance/parallel/minimal]`, + expectedText: `[sig-network] Networking Granular Checks: Pods should function for intra-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal]`, + }, + { + name: "should skip localssd on gce", + testName: `[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted`, + expectedLabel: ` [Skipped:gce] [Suite:openshift/conformance/serial]`, + expectedText: `[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [Skipped:gce] [Suite:openshift/conformance/serial]`, // notice that this isn't categorized into any of our buckets + }, + { + name: "should skip NetworkPolicy tests on multitenant", + testName: `should do something with NetworkPolicy`, + expectedLabel: ` [Suite:openshift/conformance/parallel]`, + expectedText: `should do something with NetworkPolicy [Suite:openshift/conformance/parallel]`, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + testRenamer := newGenerator(TestMaps) + testNode := &testNode{ + text: test.testName, + } + + testRenamer.generateRename(test.testName, testNode) + changed := testRenamer.output[test.testName] + + if e, a := test.expectedLabel, changed; e != a { + t.Error(a) + } + testRenamer = newRenamerFromGenerated(map[string]string{test.testName: test.expectedLabel}) + testRenamer.updateNodeText(test.testName, testNode) + + if e, a := test.expectedText, testNode.Text(); e != a { + t.Logf(e) + t.Error(a) + } + }) + } +} diff --git a/openshift-hack/e2e/include.go b/openshift-hack/e2e/include.go new file mode 100644 index 0000000000000..48efbca4a3e38 --- /dev/null +++ b/openshift-hack/e2e/include.go @@ -0,0 +1,41 @@ +package e2e + +// This file should import all the packages defining k8s e2e tests that are +// relevant to openshift. It should match the imports from +// k8s.io/kubernetes/test/e2e/e2e_test.go. It is intended to affect: +// +// - what is included in the k8s-e2e.test binary built from this package +// - the annotations generated by the annotate package + +import ( + // define and freeze constants + _ "k8s.io/kubernetes/test/e2e/feature" + _ "k8s.io/kubernetes/test/e2e/nodefeature" + + // test sources + _ "k8s.io/kubernetes/test/e2e/apimachinery" + _ "k8s.io/kubernetes/test/e2e/apps" + _ "k8s.io/kubernetes/test/e2e/architecture" + _ "k8s.io/kubernetes/test/e2e/auth" + _ "k8s.io/kubernetes/test/e2e/autoscaling" + _ "k8s.io/kubernetes/test/e2e/cloud" + _ "k8s.io/kubernetes/test/e2e/common" + _ "k8s.io/kubernetes/test/e2e/dra" + _ "k8s.io/kubernetes/test/e2e/instrumentation" + _ "k8s.io/kubernetes/test/e2e/kubectl" + _ "k8s.io/kubernetes/test/e2e/lifecycle" + _ "k8s.io/kubernetes/test/e2e/lifecycle/bootstrap" + _ "k8s.io/kubernetes/test/e2e/network" + _ "k8s.io/kubernetes/test/e2e/node" + _ "k8s.io/kubernetes/test/e2e/scheduling" + _ "k8s.io/kubernetes/test/e2e/storage" + _ "k8s.io/kubernetes/test/e2e/storage/csimock" + _ "k8s.io/kubernetes/test/e2e/storage/external" + _ "k8s.io/kubernetes/test/e2e/windows" + + // reconfigure framework + _ "k8s.io/kubernetes/test/e2e/framework/debug/init" + _ "k8s.io/kubernetes/test/e2e/framework/metrics/init" + _ "k8s.io/kubernetes/test/e2e/framework/node/init" + _ "k8s.io/kubernetes/test/utils/format" +) diff --git a/openshift-hack/e2e/kube_e2e_test.go b/openshift-hack/e2e/kube_e2e_test.go new file mode 100644 index 0000000000000..8356774e72637 --- /dev/null +++ b/openshift-hack/e2e/kube_e2e_test.go @@ -0,0 +1,126 @@ +package e2e + +//go:generate go run -mod vendor ./annotate/cmd -- ./annotate/generated/zz_generated.annotations.go + +// This file duplicates most of test/e2e/e2e_test.go but limits the included +// tests (via include.go) to tests that are relevant to openshift. + +import ( + "context" + "flag" + "fmt" + "math/rand" + "os" + "strings" + "testing" + "time" + + "gopkg.in/yaml.v2" + + // Never, ever remove the line with "/ginkgo". Without it, + // the ginkgo test runner will not detect that this + // directory contains a Ginkgo test suite. + // See https://github.com/kubernetes/kubernetes/issues/74827 + // "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" + + corev1 "k8s.io/api/core/v1" + kclientset "k8s.io/client-go/kubernetes" + "k8s.io/component-base/version" + conformancetestdata "k8s.io/kubernetes/test/conformance/testdata" + "k8s.io/kubernetes/test/e2e" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/testfiles" + e2etestingmanifests "k8s.io/kubernetes/test/e2e/testing-manifests" + testfixtures "k8s.io/kubernetes/test/fixtures" + "k8s.io/kubernetes/test/utils/image" + + // Ensure test annotation + "k8s.io/kubernetes/openshift-hack/e2e/annotate/generated" +) + +func TestMain(m *testing.M) { + var versionFlag bool + flag.CommandLine.BoolVar(&versionFlag, "version", false, "Displays version information.") + + // Register test flags, then parse flags. + e2e.HandleFlags() + + if framework.TestContext.ListImages { + for _, v := range image.GetImageConfigs() { + fmt.Println(v.GetE2EImage()) + } + os.Exit(0) + } + if versionFlag { + fmt.Printf("%s\n", version.Get()) + os.Exit(0) + } + + // Enable embedded FS file lookup as fallback + testfiles.AddFileSource(e2etestingmanifests.GetE2ETestingManifestsFS()) + testfiles.AddFileSource(testfixtures.GetTestFixturesFS()) + testfiles.AddFileSource(conformancetestdata.GetConformanceTestdataFS()) + + if framework.TestContext.ListConformanceTests { + var tests []struct { + Testname string `yaml:"testname"` + Codename string `yaml:"codename"` + Description string `yaml:"description"` + Release string `yaml:"release"` + File string `yaml:"file"` + } + + data, err := testfiles.Read("test/conformance/testdata/conformance.yaml") + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := yaml.Unmarshal(data, &tests); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := yaml.NewEncoder(os.Stdout).Encode(tests); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + os.Exit(0) + } + + // Ensure the test namespaces have disabled SCCs and label syncer. + framework.TestContext.CreateTestingNS = func(ctx context.Context, baseName string, c kclientset.Interface, labels map[string]string) (*corev1.Namespace, error) { + return CreateTestingNS(ctx, baseName, c, labels, true) + } + + framework.AfterReadingAllFlags(&framework.TestContext) + + // TODO: Deprecating repo-root over time... instead just use gobindata_util.go , see #23987. + // Right now it is still needed, for example by + // test/e2e/framework/ingress/ingress_utils.go + // for providing the optional secret.yaml file and by + // test/e2e/framework/util.go for cluster/log-dump. + if framework.TestContext.RepoRoot != "" { + testfiles.AddFileSource(testfiles.RootFileSource{Root: framework.TestContext.RepoRoot}) + } + + rand.Seed(time.Now().UnixNano()) + os.Exit(m.Run()) +} + +func TestE2E(t *testing.T) { + // TODO(soltysh): this is raw copy from end of openshift-hack/e2e/annotate/generated/zz_generated.annotations.go + // https://issues.redhat.com/browse/OCPBUGS-25641 + ginkgo.GetSuite().SetAnnotateFn(func(name string, node types.TestSpec) { + if newLabels, ok := generated.Annotations[name]; ok { + node.AppendText(newLabels) + } else { + panic(fmt.Sprintf("unable to find test %s", name)) + } + if strings.Contains(name, "Kubectl client Kubectl prune with applyset should apply and prune objects") { + fmt.Printf("Trying to annotate %q\n", name) + } + }) + + e2e.RunE2ETests(t) +} diff --git a/openshift-hack/e2e/namespace.go b/openshift-hack/e2e/namespace.go new file mode 100644 index 0000000000000..061e37270072e --- /dev/null +++ b/openshift-hack/e2e/namespace.go @@ -0,0 +1,132 @@ +package e2e + +import ( + "context" + "fmt" + "runtime/debug" + "strings" + + "github.com/onsi/ginkgo/v2" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + kclientset "k8s.io/client-go/kubernetes" + rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1" + "k8s.io/client-go/util/retry" + "k8s.io/kubernetes/test/e2e/framework" + + projectv1 "github.com/openshift/api/project/v1" +) + +// CreateTestingNS ensures that kubernetes e2e tests have their service accounts in the privileged and anyuid SCCs +func CreateTestingNS(ctx context.Context, baseName string, c kclientset.Interface, labels map[string]string, isKubeNamespace bool) (*corev1.Namespace, error) { + if !strings.HasPrefix(baseName, "e2e-") { + baseName = "e2e-" + baseName + } + + if labels == nil { + labels = map[string]string{} + } + // turn off the OpenShift label syncer so that it does not attempt to sync + // the PodSecurity admission labels + labels["security.openshift.io/scc.podSecurityLabelSync"] = "false" + + if isKubeNamespace { + labels["security.openshift.io/disable-securitycontextconstraints"] = "true" + } + + ns, err := framework.CreateTestingNS(ctx, baseName, c, labels) + if err != nil { + return ns, err + } + + if !isKubeNamespace { + return ns, err + } + + // Add anyuid and privileged permissions for upstream tests + clientConfig, err := framework.LoadConfig() + if err != nil { + return ns, err + } + + rbacClient, err := rbacv1client.NewForConfig(clientConfig) + if err != nil { + return ns, err + } + framework.Logf("About to run a Kube e2e test, ensuring namespace/%s is privileged", ns.Name) + // add the "privileged" scc to ensure pods that explicitly + // request extra capabilities are not rejected + addRoleToE2EServiceAccounts(ctx, rbacClient, []corev1.Namespace{*ns}, "system:openshift:scc:privileged") + // add the "anyuid" scc to ensure pods that don't specify a + // uid don't get forced into a range (mimics upstream + // behavior) + addRoleToE2EServiceAccounts(ctx, rbacClient, []corev1.Namespace{*ns}, "system:openshift:scc:anyuid") + // add the "hostmount-anyuid" scc to ensure pods using hostPath + // can execute tests + addRoleToE2EServiceAccounts(ctx, rbacClient, []corev1.Namespace{*ns}, "system:openshift:scc:hostmount-anyuid") + + // The intra-pod test requires that the service account have + // permission to retrieve service endpoints. + addRoleToE2EServiceAccounts(ctx, rbacClient, []corev1.Namespace{*ns}, "view") + + // in practice too many kube tests ignore scheduling constraints + allowAllNodeScheduling(ctx, c, ns.Name) + + return ns, err +} + +var longRetry = wait.Backoff{Steps: 100} + +func fatalErr(msg interface{}) { + // the path that leads to this being called isn't always clear... + fmt.Fprintln(ginkgo.GinkgoWriter, string(debug.Stack())) + framework.Failf("%v", msg) +} + +func addRoleToE2EServiceAccounts(ctx context.Context, rbacClient rbacv1client.RbacV1Interface, namespaces []corev1.Namespace, roleName string) { + err := retry.RetryOnConflict(longRetry, func() error { + for _, ns := range namespaces { + if ns.Status.Phase != corev1.NamespaceTerminating { + _, err := rbacClient.RoleBindings(ns.Name).Create(ctx, &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{GenerateName: "default-" + roleName, Namespace: ns.Name}, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: roleName, + }, + Subjects: []rbacv1.Subject{ + {Name: "default", Namespace: ns.Name, Kind: rbacv1.ServiceAccountKind}, + }, + }, metav1.CreateOptions{}) + if err != nil { + framework.Logf("Warning: Failed to add role to e2e service account: %v", err) + } + } + } + return nil + }) + if err != nil { + fatalErr(err) + } +} + +// allowAllNodeScheduling sets the annotation on namespace that allows all nodes to be scheduled onto. +func allowAllNodeScheduling(ctx context.Context, c kclientset.Interface, namespace string) { + err := retry.RetryOnConflict(longRetry, func() error { + ns, err := c.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}) + if err != nil { + return err + } + if ns.Annotations == nil { + ns.Annotations = make(map[string]string) + } + ns.Annotations[projectv1.ProjectNodeSelector] = "" + _, err = c.CoreV1().Namespaces().Update(ctx, ns, metav1.UpdateOptions{}) + return err + }) + if err != nil { + fatalErr(err) + } +} diff --git a/openshift-hack/images/OWNERS b/openshift-hack/images/OWNERS new file mode 100644 index 0000000000000..7b196b0fb7003 --- /dev/null +++ b/openshift-hack/images/OWNERS @@ -0,0 +1,11 @@ +reviewers: + - smarterclayton + - giuseppe + - JacobTanenbaum + - pweil- + - pecameron + - sdodson +approvers: + - smarterclayton + - pweil- + - sdodson diff --git a/openshift-hack/images/hyperkube/Dockerfile.rhel b/openshift-hack/images/hyperkube/Dockerfile.rhel new file mode 100644 index 0000000000000..3e9b98aac54cd --- /dev/null +++ b/openshift-hack/images/hyperkube/Dockerfile.rhel @@ -0,0 +1,17 @@ +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS builder +WORKDIR /go/src/k8s.io/kubernetes +COPY . . +RUN make WHAT='cmd/kube-apiserver cmd/kube-controller-manager cmd/kube-scheduler cmd/kubelet cmd/watch-termination openshift-hack/cmd/k8s-tests openshift-hack/cmd/k8s-tests-ext' && \ + mkdir -p /tmp/build && \ + cp openshift-hack/images/hyperkube/hyperkube openshift-hack/images/hyperkube/kubensenter /tmp/build && \ + cp /go/src/k8s.io/kubernetes/_output/local/bin/linux/$(go env GOARCH)/{kube-apiserver,kube-controller-manager,kube-scheduler,kubelet,watch-termination,k8s-tests,k8s-tests-ext} \ + /tmp/build && \ + gzip /tmp/build/k8s-tests-ext + +FROM registry.ci.openshift.org/ocp/4.19:base-rhel9 +RUN yum install -y --setopt=tsflags=nodocs --setopt=skip_missing_names_on_install=False iproute && yum clean all +COPY --from=builder /tmp/build/* /usr/bin/ +LABEL io.k8s.display-name="OpenShift Kubernetes Server Commands" \ + io.k8s.description="OpenShift is a platform for developing, building, and deploying containerized applications." \ + io.openshift.tags="openshift,hyperkube" \ + io.openshift.build.versions="kubernetes=1.32.0" \ No newline at end of file diff --git a/openshift-hack/images/hyperkube/OWNERS b/openshift-hack/images/hyperkube/OWNERS new file mode 100644 index 0000000000000..e814678493032 --- /dev/null +++ b/openshift-hack/images/hyperkube/OWNERS @@ -0,0 +1,5 @@ +reviewers: + - smarterclayton + - sdodson +approvers: + - smarterclayton diff --git a/openshift-hack/images/hyperkube/hyperkube b/openshift-hack/images/hyperkube/hyperkube new file mode 100755 index 0000000000000..cfed9cd737c02 --- /dev/null +++ b/openshift-hack/images/hyperkube/hyperkube @@ -0,0 +1,57 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +BINS=( + kube-apiserver + kube-controller-manager + kube-scheduler + kubelet +) + +function array_contains() { + local search="$1" + local element + shift + for element; do + if [[ "${element}" == "${search}" ]]; then + return 0 + fi + done + return 1 +} + +function print_usage() { + cat </dev/null; then + echo "${command}: command not found" + exit 1 + fi + exec "${command}" "${@}" +} + +main "${@}" \ No newline at end of file diff --git a/openshift-hack/images/hyperkube/kubensenter b/openshift-hack/images/hyperkube/kubensenter new file mode 100644 index 0000000000000..56ab26ee9e952 --- /dev/null +++ b/openshift-hack/images/hyperkube/kubensenter @@ -0,0 +1,117 @@ +#!/bin/bash + +# shellcheck disable=SC2016 +usage() { + echo "A command line wrapper to run commands or shells inside the" + echo "kubens.service mount namespace." + echo + echo "Usage:" + echo " $(basename "$0") [--verbose|--quiet] [command ...]" + echo + echo 'Autodetect whether the `kubens.service` has pinned a mount namespace in a' + echo 'well-known location, and if so, join it by passing it and the user-specified' + echo 'command to nsenter(1). If `kubens.service` has not set up the mount namespace,' + echo 'the user-specified command is still executed by nsenter(1) but no namespace is' + echo 'entered.' + echo + echo 'If $KUBENSMNT is set in the environment, skip autodetection and attempt to join' + echo 'that mount namespace by passing it and the user-specified command to' + echo 'nsenter(1). If the mount namespace is missing or invalid, the command will' + echo 'fail.' + echo + echo 'In either case, if no command is given on the command line, nsenter(1) will' + echo 'spawn a new interactive shell which will be inside the mount namespace if' + echo 'detected.' + exit 1 +} + +LOGLEVEL=${KUBENSENTER_LOG:-1} +_log() { + local level=$1; shift + if [[ $level -le $LOGLEVEL ]]; then + echo "kubensenter: $*" >&2 + fi +} + +info() { + _log 1 "$*" +} + +debug() { + _log 2 "$*" +} + +# Returns 0 if the argument given is a mount namespace +ismnt() { + local nsfs + nsfs=$(findmnt -o SOURCE -n -t nsfs "$1") + [[ $nsfs =~ ^nsfs\[mnt:\[ ]] +} + +# Set KUBENSMNT to the default location that kubens.service uses if KUBENSMNT isn't already set. +DEFAULT_KUBENSMNT=${DEFAULT_KUBENSMNT:-"/run/kubens/mnt"} +autodetect() { + local default=$DEFAULT_KUBENSMNT + if [[ -n $KUBENSMNT ]]; then + debug "Autodetect: \$KUBENSMNT already set" + return 0 + fi + if [[ ! -e $default ]]; then + debug "Autodetect: No mount namespace found at $default" + return 1 + fi + if ! ismnt "$default"; then + info "Autodetect: Stale or mismatched namespace at $default" + return 1 + fi + KUBENSMNT=$default + info "Autodetect: kubens.service namespace found at $KUBENSMNT" + return 0 +} + +# Wrap the user-given command in nsenter, joining the mount namespace set in $KUBENSMNT if set +kubensenter() { + local nsarg + if [[ -n $KUBENSMNT ]]; then + debug "Joining mount namespace in $KUBENSMNT" + nsarg=$(printf -- "--mount=%q" "$KUBENSMNT") + else + debug "KUBENSMNT not set; running normally" + # Intentional fallthrough to run nsenter anyway: + # - If $@ is non-empty, nsenter effectively runs `exec "$@"` + # - If $@ is empty, nsenter spawns a new shell + fi + # Using 'exec' is important here; Without it, systemd may have trouble + # seeing the underlying process especially if it's using 'Type=notify' + # semantics. + # shellcheck disable=SC2086 + # ^- Intentionally collapse $nsarg if not set (and we've already shell-quoted it above if we did set it) + exec nsenter $nsarg "$@" +} + +main() { + while [[ -n $1 ]]; do + case "$1" in + -h | --help) + usage + ;; + -v | --verbose) + shift + ((LOGLEVEL++)) + ;; + -q | --quiet) + shift + ((LOGLEVEL--)) + ;; + *) + break + ;; + esac + done + + autodetect + kubensenter "$@" +} + +# bash modulino +[[ "${BASH_SOURCE[0]}" == "$0" ]] && main "$@" diff --git a/openshift-hack/images/installer-kube-apiserver-artifacts/Dockerfile.rhel b/openshift-hack/images/installer-kube-apiserver-artifacts/Dockerfile.rhel new file mode 100644 index 0000000000000..fb57a6042fe64 --- /dev/null +++ b/openshift-hack/images/installer-kube-apiserver-artifacts/Dockerfile.rhel @@ -0,0 +1,55 @@ +# This Dockerfile builds an image containing Mac and Linux/AMD64 versions of +# the kube-apiserver layered on top of the cluster-native Linux installer image. +# The resulting image is used to build the openshift-install binary. + +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS macbuilder +ARG TAGS="" +WORKDIR /go/src/k8s.io/kubernetes +COPY . . +ENV KUBE_BUILD_PLATFORMS=darwin/amd64 +ENV KUBE_STATIC_OVERRIDES=kube-apiserver +RUN make WHAT='cmd/kube-apiserver' + +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS macarmbuilder +ARG TAGS="" +WORKDIR /go/src/k8s.io/kubernetes +COPY . . +ENV KUBE_BUILD_PLATFORMS=darwin/arm64 +ENV KUBE_STATIC_OVERRIDES=kube-apiserver +RUN make WHAT='cmd/kube-apiserver' + +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS linuxbuilder +ARG TAGS="" +WORKDIR /go/src/k8s.io/kubernetes +COPY . . +ENV GO_COMPLIANCE_EXCLUDE=".*" +ENV KUBE_BUILD_PLATFORMS=linux/amd64 +ENV KUBE_STATIC_OVERRIDES=kube-apiserver +RUN make WHAT='cmd/kube-apiserver' + +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS linuxarmbuilder +ARG TAGS="" +WORKDIR /go/src/k8s.io/kubernetes +COPY . . +ENV GO_COMPLIANCE_EXCLUDE=".*" +ENV KUBE_BUILD_PLATFORMS=linux/arm64 +ENV KUBE_STATIC_OVERRIDES=kube-apiserver +RUN make WHAT='cmd/kube-apiserver' + +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS builder +ARG TAGS="" +WORKDIR /go/src/k8s.io/kubernetes +COPY . . +ENV GO_COMPLIANCE_EXCLUDE=".*" +ENV KUBE_STATIC_OVERRIDES=kube-apiserver +RUN make WHAT='cmd/kube-apiserver' + +FROM registry.ci.openshift.org/ocp/4.19:base-rhel9 +COPY --from=macbuilder /go/src/k8s.io/kubernetes/_output/local/bin/darwin/amd64/kube-apiserver /usr/share/openshift/darwin/amd64/kube-apiserver +COPY --from=macarmbuilder /go/src/k8s.io/kubernetes/_output/local/bin/darwin/arm64/kube-apiserver /usr/share/openshift/darwin/arm64/kube-apiserver +COPY --from=linuxbuilder /go/src/k8s.io/kubernetes/_output/local/bin/linux/amd64/kube-apiserver /usr/share/openshift/linux/amd64/kube-apiserver +COPY --from=linuxarmbuilder /go/src/k8s.io/kubernetes/_output/local/bin/linux/arm64/kube-apiserver /usr/share/openshift/linux/arm64/kube-apiserver +COPY --from=builder /go/src/k8s.io/kubernetes/_output/local/bin/ /usr/share/openshift/ + +# This image is not an operator, it is only used as part of the build pipeline. +LABEL io.openshift.release.operator=false diff --git a/openshift-hack/images/installer-kube-apiserver-artifacts/OWNERS b/openshift-hack/images/installer-kube-apiserver-artifacts/OWNERS new file mode 100644 index 0000000000000..f382794577f99 --- /dev/null +++ b/openshift-hack/images/installer-kube-apiserver-artifacts/OWNERS @@ -0,0 +1,10 @@ +reviewers: + - JoelSpeed + - vincepri + - patrickdillon + - r4f4 +approvers: + - JoelSpeed + - vincepri + - patrickdillon + - r4f4 diff --git a/openshift-hack/images/kube-proxy/Dockerfile.rhel b/openshift-hack/images/kube-proxy/Dockerfile.rhel new file mode 100644 index 0000000000000..619ce5942b8d9 --- /dev/null +++ b/openshift-hack/images/kube-proxy/Dockerfile.rhel @@ -0,0 +1,15 @@ +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS builder +WORKDIR /go/src/k8s.io/kubernetes +COPY . . +RUN make WHAT='cmd/kube-proxy' && \ + mkdir -p /tmp/build && \ + cp /go/src/k8s.io/kubernetes/_output/local/bin/linux/$(go env GOARCH)/kube-proxy /tmp/build + +FROM registry.ci.openshift.org/ocp/4.19:base-rhel9 +RUN INSTALL_PKGS="conntrack-tools iptables nftables" && \ + yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ + yum clean all && rm -rf /var/cache/* +COPY --from=builder /tmp/build/* /usr/bin/ +LABEL io.k8s.display-name="Kubernetes kube-proxy" \ + io.k8s.description="Provides kube-proxy for external CNI plugins" \ + io.openshift.tags="openshift,kube-proxy" diff --git a/openshift-hack/images/kube-proxy/OWNERS b/openshift-hack/images/kube-proxy/OWNERS new file mode 100644 index 0000000000000..df1fc6730a20b --- /dev/null +++ b/openshift-hack/images/kube-proxy/OWNERS @@ -0,0 +1,19 @@ +reviewers: + - abhat + - danwinship + - dougbtv + - JacobTanenbaum + - jcaamano + - kyrtapz + - trozet + - tssurya +approvers: + - abhat + - danwinship + - dougbtv + - fepan + - JacobTanenbaum + - jcaamano + - knobunc + - kyrtapz + - trozet diff --git a/openshift-hack/images/kube-proxy/test-kube-proxy.sh b/openshift-hack/images/kube-proxy/test-kube-proxy.sh new file mode 100755 index 0000000000000..514b52eff28d9 --- /dev/null +++ b/openshift-hack/images/kube-proxy/test-kube-proxy.sh @@ -0,0 +1,244 @@ +#!/bin/sh + +set -o nounset +set -o errexit +set -o pipefail + +# This script tests the kube-proxy image without actually using it as +# part of the infrastructure of a cluster. It is intended to be copied +# to the kubernetes-tests image for use in CI and should have no +# dependencies beyond oc and basic shell stuff. + +# There is no good way to "properly" test the kube-proxy image in +# OpenShift CI, because it is only used as a dependency of third-party +# software (e.g. Calico); no fully-RH-supported configuration uses it. +# +# However, since we don't apply any kube-proxy-specific patches to our +# tree, we can assume that it *mostly* works, since we are building +# from sources that passed upstream testing. This script is just to +# confirm that our build is not somehow completely broken (e.g. +# immediate segfault due to a bad build environment). + +if [[ -z "${KUBE_PROXY_IMAGE}" ]]; then + echo "KUBE_PROXY_IMAGE not set" 1>&2 + exit 1 +fi + +TMPDIR=$(mktemp --tmpdir -d kube-proxy.XXXXXX) +function cleanup() { + oc delete namespace kube-proxy-test || true + oc delete clusterrole kube-proxy-test || true + oc delete clusterrolebinding kube-proxy-test || true + rm -rf "${TMPDIR}" +} +trap "cleanup" EXIT + +function indent() { + sed -e 's/^/ /' "$@" + echo "" +} + +# Decide what kube-proxy mode to use. +# (jsonpath expression copied from types_cluster_version.go) +OCP_VERSION=$(oc get clusterversion version -o jsonpath='{.status.history[?(@.state=="Completed")].version}') +case "${OCP_VERSION}" in + 4.17.*|4.18.*) + # 4.17 and 4.18 always use RHEL 9 (and nftables mode was still alpha in 4.17), so + # use iptables mode + PROXY_MODE="iptables" + ;; + *) + # 4.19 and later may use RHEL 10, so use nftables mode + PROXY_MODE="nftables" + ;; +esac + +echo "Setting up Namespace and RBAC" +oc create -f - < "${TMPDIR}/metrics.txt" + grep -q '^kubeproxy_sync_proxy_rules_duration_seconds_count [^0]' "${TMPDIR}/metrics.txt" +} +synced=false +for count in $(seq 1 10); do + date + if kube_proxy_synced; then + synced=true + break + fi + sleep 5 +done +date +if [[ "${synced}" != true ]]; then + echo "kube-proxy failed to sync to ${PROXY_MODE}:" + oc logs -n kube-proxy-test kube-proxy |& indent + + echo "last-seen metrics:" + indent "${TMPDIR}/metrics.txt" + + exit 1 +fi + +# Dump the ruleset; since RHEL9 uses iptables-nft, kube-proxy's rules +# will show up in the nft ruleset regardless of whether kube-proxy is +# using iptables or nftables. +echo "Dumping rules" +oc exec -n kube-proxy-test kube-proxy -- nft list ruleset >& "${TMPDIR}/nft.out" + +# We don't want to hardcode any assumptions about what kube-proxy's +# rules look like, but it necessarily must be the case that every +# clusterIP appears somewhere in the output. (We could look for +# endpoint IPs too, but that's more racy if there's any chance the +# cluster could be changing.) +exitcode=0 +for service in kubernetes.default dns-default.openshift-dns router-default.openshift-ingress; do + name="${service%.*}" + namespace="${service#*.}" + clusterIP="$(oc get service -n ${namespace} ${name} -o jsonpath='{.spec.clusterIP}')" + echo "Looking for ${service} cluster IP (${clusterIP}) in ruleset" + for ip in ${clusterIP}; do + if ! grep --quiet --fixed-strings " ${ip} " "${TMPDIR}/nft.out"; then + echo "Did not find IP ${ip} (from service ${name} in namespace ${namespace}) in ruleset" 1>&2 + exitcode=1 + fi + done +done +echo "" + +if [[ "${exitcode}" == 1 ]]; then + echo "Ruleset was:" + indent "${TMPDIR}/nft.out" + + echo "kube-proxy logs:" + oc logs -n kube-proxy-test kube-proxy |& indent +fi + +exit "${exitcode}" diff --git a/openshift-hack/images/tests/Dockerfile.rhel b/openshift-hack/images/tests/Dockerfile.rhel new file mode 100644 index 0000000000000..ff0b2fa6e1dba --- /dev/null +++ b/openshift-hack/images/tests/Dockerfile.rhel @@ -0,0 +1,22 @@ +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS builder +WORKDIR /go/src/k8s.io/kubernetes +COPY . . +RUN make WHAT=openshift-hack/e2e/k8s-e2e.test; \ + make WHAT=vendor/github.com/onsi/ginkgo/v2/ginkgo; \ + mkdir -p /tmp/build; \ + cp /go/src/k8s.io/kubernetes/_output/local/bin/linux/$(go env GOARCH)/k8s-e2e.test /tmp/build/; \ + cp /go/src/k8s.io/kubernetes/_output/local/bin/linux/$(go env GOARCH)/ginkgo /tmp/build/; \ + cp /go/src/k8s.io/kubernetes/openshift-hack/test-kubernetes-e2e.sh /tmp/build/; \ + cp /go/src/k8s.io/kubernetes/openshift-hack/images/kube-proxy/test-kube-proxy.sh /tmp/build/ + +FROM registry.ci.openshift.org/ocp/4.19:tools +COPY --from=builder /tmp/build/k8s-e2e.test /usr/bin/ +COPY --from=builder /tmp/build/ginkgo /usr/bin/ +COPY --from=builder /tmp/build/test-kubernetes-e2e.sh /usr/bin/ +COPY --from=builder /tmp/build/test-kube-proxy.sh /usr/bin/ +RUN yum install --setopt=tsflags=nodocs -y git gzip util-linux && yum clean all && rm -rf /var/cache/yum/* && \ + git config --system user.name test && \ + git config --system user.email test@test.com && \ + chmod g+w /etc/passwd +LABEL io.k8s.display-name="Kubernetes End-to-End Tests" \ + io.openshift.tags="k8s,tests,e2e" diff --git a/openshift-hack/images/tests/OWNERS b/openshift-hack/images/tests/OWNERS new file mode 100644 index 0000000000000..e814678493032 --- /dev/null +++ b/openshift-hack/images/tests/OWNERS @@ -0,0 +1,5 @@ +reviewers: + - smarterclayton + - sdodson +approvers: + - smarterclayton diff --git a/openshift-hack/kubensenter.env b/openshift-hack/kubensenter.env new file mode 100644 index 0000000000000..c37c5bbab2868 --- /dev/null +++ b/openshift-hack/kubensenter.env @@ -0,0 +1,16 @@ +# Configure which version of kubensenter we need to synchronize + +# Define the github repo where we should fetch the kubensenter script +REPO="github.com/containers/kubensmnt" + +# The specific commit or tag of the kubensenter script +# Note: Should be an explicit tag or commit SHA - Setting to a branch name will cause unexpected verification failures in the future. +COMMIT=v1.2.0 # (36e5652992df9a3d4abc3d8f02a33c2e364efda9) + +# The branch name or tag glob to resolve when 'update-kubensenter.sh --to-latest' is run: +# - If this resolves to a branch, COMMIT will be set to the latest commit hash on that branch. +# - If this resolves to a tag name, COMMIT will be set to that tag. +# - May contain a glob expression such as "v1.1.*" that would match any of the following: +# v1.1.0 v1.1.3 v1.1.22-rc1" +#TARGET="main" +TARGET="v1.2.*" diff --git a/openshift-hack/lib/build/binaries.sh b/openshift-hack/lib/build/binaries.sh new file mode 100644 index 0000000000000..e3c71254f37c1 --- /dev/null +++ b/openshift-hack/lib/build/binaries.sh @@ -0,0 +1,457 @@ +#!/usr/bin/env bash + +# This library holds utility functions for building +# and placing Golang binaries for multiple arches. + +# os::build::binaries_from_targets take a list of build targets and return the +# full go package to be built +function os::build::binaries_from_targets() { + local target + for target; do + if [[ -z "${target}" ]]; then + continue + fi + echo "${OS_GO_PACKAGE}/${target}" + done +} +readonly -f os::build::binaries_from_targets + +# Asks golang what it thinks the host platform is. The go tool chain does some +# slightly different things when the target platform matches the host platform. +function os::build::host_platform() { + echo "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" +} +readonly -f os::build::host_platform + +# Create a user friendly version of host_platform for end users +function os::build::host_platform_friendly() { + local platform=${1:-} + if [[ -z "${platform}" ]]; then + platform=$(os::build::host_platform) + fi + if [[ $platform == "windows/amd64" ]]; then + echo "windows" + elif [[ $platform == "darwin/amd64" ]]; then + echo "mac" + elif [[ $platform == "linux/386" ]]; then + echo "linux-32bit" + elif [[ $platform == "linux/amd64" ]]; then + echo "linux-64bit" + elif [[ $platform == "linux/ppc64le" ]]; then + echo "linux-powerpc64" + elif [[ $platform == "linux/arm64" ]]; then + echo "linux-arm64" + elif [[ $platform == "linux/s390x" ]]; then + echo "linux-s390" + else + echo "$(go env GOHOSTOS)-$(go env GOHOSTARCH)" + fi +} +readonly -f os::build::host_platform_friendly + +# This converts from platform/arch to PLATFORM_ARCH, host platform will be +# considered if no parameter passed +function os::build::platform_arch() { + local platform=${1:-} + if [[ -z "${platform}" ]]; then + platform=$(os::build::host_platform) + fi + + echo "${platform}" | tr '[:lower:]/' '[:upper:]_' +} +readonly -f os::build::platform_arch + +# os::build::setup_env will check that the `go` commands is available in +# ${PATH}. If not running on Travis, it will also check that the Go version is +# good enough for the Kubernetes build. +# +# Output Vars: +# export GOPATH - A modified GOPATH to our created tree along with extra +# stuff. +# export GOBIN - This is actively unset if already set as we want binaries +# placed in a predictable place. +function os::build::setup_env() { + os::util::ensure::system_binary_exists 'go' + + if [[ -z "$(which sha256sum)" ]]; then + sha256sum() { + return 0 + } + fi + + # Travis continuous build uses a head go release that doesn't report + # a version number, so we skip this check on Travis. It's unnecessary + # there anyway. + if [[ "${TRAVIS:-}" != "true" ]]; then + os::golang::verify_go_version + fi + # For any tools that expect this to be set (it is default in golang 1.6), + # force vendor experiment. + export GO15VENDOREXPERIMENT=1 + + unset GOBIN + + # create a local GOPATH in _output + GOPATH="${OS_OUTPUT}/go" + OS_TARGET_BIN="${OS_OUTPUT}/go/bin" + local go_pkg_dir="${GOPATH}/src/${OS_GO_PACKAGE}" + local go_pkg_basedir + go_pkg_basedir="$(dirname "${go_pkg_dir}")" + + mkdir -p "${go_pkg_basedir}" + rm -f "${go_pkg_dir}" + + # TODO: This symlink should be relative. + ln -s "${OS_ROOT}" "${go_pkg_dir}" + + # lots of tools "just don't work" unless we're in the GOPATH + cd "${go_pkg_dir}" || exit 1 + + # Append OS_EXTRA_GOPATH to the GOPATH if it is defined. + if [[ -n ${OS_EXTRA_GOPATH:-} ]]; then + GOPATH="${GOPATH}:${OS_EXTRA_GOPATH}" + fi + + export GOPATH + export OS_TARGET_BIN +} +readonly -f os::build::setup_env + +# Build static binary targets. +# +# Input: +# $@ - targets and go flags. If no targets are set then all binaries targets +# are built. +# OS_BUILD_PLATFORMS - Incoming variable of targets to build for. If unset +# then just the host architecture is built. +function os::build::build_static_binaries() { + CGO_ENABLED=0 os::build::build_binaries -installsuffix=cgo "$@" +} +readonly -f os::build::build_static_binaries + +# Build binary targets specified +# +# Input: +# $@ - targets and go flags. If no targets are set then all binaries targets +# are built. +# OS_BUILD_PLATFORMS - Incoming variable of targets to build for. If unset +# then just the host architecture is built. +function os::build::build_binaries() { + if [[ $# -eq 0 ]]; then + return + fi + local -a binaries=( "$@" ) + # Create a sub-shell so that we don't pollute the outer environment + ( os::build::internal::build_binaries "${binaries[@]+"${binaries[@]}"}" ) +} + +# Build binary targets specified. Should always be run in a sub-shell so we don't leak GOBIN +# +# Input: +# $@ - targets and go flags. If no targets are set then all binaries targets +# are built. +# OS_BUILD_PLATFORMS - Incoming variable of targets to build for. If unset +# then just the host architecture is built. +os::build::internal::build_binaries() { + # Check for `go` binary and set ${GOPATH}. + os::build::setup_env + + # Fetch the version. + local version_ldflags + version_ldflags=$(os::build::ldflags) + + local goflags + # Use eval to preserve embedded quoted strings. + eval "goflags=(${OS_GOFLAGS:-})" + gogcflags="${GOGCFLAGS:-}" + + local arg + for arg; do + if [[ "${arg}" == -* ]]; then + # Assume arguments starting with a dash are flags to pass to go. + goflags+=("${arg}") + fi + done + + os::build::export_targets "$@" + + if [[ ! "${targets[*]:+${targets[*]}}" || ! "${binaries[*]:+${binaries[*]}}" ]]; then + return 0 + fi + + local -a nonstatics=() + local -a tests=() + for binary in "${binaries[@]-}"; do + if [[ "${binary}" =~ ".test"$ ]]; then + tests+=("$binary") + else + nonstatics+=("$binary") + fi + done + + local pkgdir="${OS_OUTPUT_PKGDIR}" + if [[ "${CGO_ENABLED-}" == "0" ]]; then + pkgdir+="/static" + fi + + local host_platform + host_platform=$(os::build::host_platform) + local platform + for platform in "${platforms[@]+"${platforms[@]}"}"; do + echo "++ Building go targets for ${platform}:" "${targets[@]}" + mkdir -p "${OS_OUTPUT_BINPATH}/${platform}" + + # output directly to the desired location + if [[ "$platform" == "$host_platform" ]]; then + export GOBIN="${OS_OUTPUT_BINPATH}/${platform}" + else + unset GOBIN + fi + + local platform_gotags_envvar + platform_gotags_envvar=OS_GOFLAGS_TAGS_$(os::build::platform_arch "${platform}") + local platform_gotags_test_envvar + platform_gotags_test_envvar=OS_GOFLAGS_TAGS_TEST_$(os::build::platform_arch "${platform}") + + # work around https://github.com/golang/go/issues/11887 + local local_ldflags="${version_ldflags}" + if [[ "${platform}" == "darwin/amd64" ]]; then + local_ldflags+=" -s" + fi + + #Add Windows File Properties/Version Info and Icon Resource for oc.exe + if [[ "$platform" == "windows/amd64" ]]; then + os::build::generate_windows_versioninfo + fi + + if [[ ${#nonstatics[@]} -gt 0 ]]; then + GOOS=${platform%/*} GOARCH=${platform##*/} go install \ + -tags "${OS_GOFLAGS_TAGS-} ${!platform_gotags_envvar:-}" \ + -ldflags="${local_ldflags}" \ + "${goflags[@]:+${goflags[@]}}" \ + -gcflags "${gogcflags}" \ + "${nonstatics[@]}" + + # GOBIN is not supported on cross-compile in Go 1.5+ - move to the correct target + if [[ "$platform" != "$host_platform" ]]; then + local platform_src="/${platform//\//_}" + mv "${OS_TARGET_BIN}/${platform_src}/"* "${OS_OUTPUT_BINPATH}/${platform}/" + fi + fi + + if [[ "$platform" == "windows/amd64" ]]; then + os::build::clean_windows_versioninfo + fi + + for test in "${tests[@]:+${tests[@]}}"; do + local outfile + outfile="${OS_OUTPUT_BINPATH}/${platform}/$(basename "${test}")" + # disabling cgo allows use of delve + CGO_ENABLED="${OS_TEST_CGO_ENABLED:-}" GOOS=${platform%/*} GOARCH=${platform##*/} go test \ + -tags "${OS_GOFLAGS_TAGS-} ${!platform_gotags_test_envvar:-}" \ + -ldflags "${local_ldflags}" \ + -i -c -o "${outfile}" \ + "${goflags[@]:+${goflags[@]}}" \ + "$(dirname "${test}")" + done + done + + os::build::check_binaries +} +readonly -f os::build::build_binaries + + # Generates the set of target packages, binaries, and platforms to build for. +# Accepts binaries via $@, and platforms via OS_BUILD_PLATFORMS, or defaults to +# the current platform. +function os::build::export_targets() { + platforms=("${OS_BUILD_PLATFORMS[@]:+${OS_BUILD_PLATFORMS[@]}}") + + targets=() + local arg + for arg; do + if [[ "${arg}" != -* ]]; then + targets+=("${arg}") + fi + done + + binaries=($(os::build::binaries_from_targets "${targets[@]-}")) +} +readonly -f os::build::export_targets + +# This will take $@ from $GOPATH/bin and copy them to the appropriate +# place in ${OS_OUTPUT_BINDIR} +# +# If OS_RELEASE_ARCHIVE is set, tar archives prefixed with OS_RELEASE_ARCHIVE for +# each of OS_BUILD_PLATFORMS are created. +# +# Ideally this wouldn't be necessary and we could just set GOBIN to +# OS_OUTPUT_BINDIR but that won't work in the face of cross compilation. 'go +# install' will place binaries that match the host platform directly in $GOBIN +# while placing cross compiled binaries into `platform_arch` subdirs. This +# complicates pretty much everything else we do around packaging and such. +function os::build::place_bins() { + ( + local host_platform + host_platform=$(os::build::host_platform) + + if [[ "${OS_RELEASE_ARCHIVE-}" != "" ]]; then + os::build::version::get_vars + mkdir -p "${OS_OUTPUT_RELEASEPATH}" + fi + + os::build::export_targets "$@" + for platform in "${platforms[@]+"${platforms[@]}"}"; do + # The substitution on platform_src below will replace all slashes with + # underscores. It'll transform darwin/amd64 -> darwin_amd64. + local platform_src="/${platform//\//_}" + + # Skip this directory if the platform has no binaries. + if [[ ! -d "${OS_OUTPUT_BINPATH}/${platform}" ]]; then + continue + fi + + # Create an array of binaries to release. Append .exe variants if the platform is windows. + local -a binaries=() + for binary in "${targets[@]}"; do + binary=$(basename "$binary") + if [[ $platform == "windows/amd64" ]]; then + binaries+=("${binary}.exe") + else + binaries+=("${binary}") + fi + done + + # If no release archive was requested, we're done. + if [[ "${OS_RELEASE_ARCHIVE-}" == "" ]]; then + continue + fi + + # Create a temporary bin directory containing only the binaries marked for release. + local release_binpath + release_binpath=$(mktemp -d "openshift.release.${OS_RELEASE_ARCHIVE}.XXX") + for binary in "${binaries[@]}"; do + cp "${OS_OUTPUT_BINPATH}/${platform}/${binary}" "${release_binpath}/" + done + + # Create the release archive. + platform="$( os::build::host_platform_friendly "${platform}" )" + if [[ ${OS_RELEASE_ARCHIVE} == "openshift-origin" ]]; then + for file in "${OS_BINARY_RELEASE_CLIENT_EXTRA[@]}"; do + cp "${file}" "${release_binpath}/" + done + if [[ $platform == "linux-64bit" ]]; then + OS_RELEASE_ARCHIVE="openshift-origin-server" os::build::archive::tar "${OS_BINARY_RELEASE_SERVER_LINUX[@]}" + elif [[ $platform == "linux-powerpc64" ]]; then + OS_RELEASE_ARCHIVE="openshift-origin-server" os::build::archive::tar "${OS_BINARY_RELEASE_SERVER_LINUX[@]}" + elif [[ $platform == "linux-arm64" ]]; then + OS_RELEASE_ARCHIVE="openshift-origin-server" os::build::archive::tar "${OS_BINARY_RELEASE_SERVER_LINUX[@]}" + elif [[ $platform == "linux-s390" ]]; then + OS_RELEASE_ARCHIVE="openshift-origin-server" os::build::archive::tar "${OS_BINARY_RELEASE_SERVER_LINUX[@]}" + else + echo "++ ERROR: No release type defined for $platform" + fi + else + if [[ $platform == "linux-64bit" || $platform == "linux-powerpc64" || $platform == "linux-arm64" || $platform == "linux-s390" ]]; then + os::build::archive::tar "./*" + else + echo "++ ERROR: No release type defined for $platform" + fi + fi + rm -rf "${release_binpath}" + done + ) +} +readonly -f os::build::place_bins + +# os::build::release_sha calculates a SHA256 checksum over the contents of the +# built release directory. +function os::build::release_sha() { + pushd "${OS_OUTPUT_RELEASEPATH}" &> /dev/null || exit 1 + find . -maxdepth 1 -type f | xargs sha256sum > CHECKSUM + popd &> /dev/null || exit 1 +} +readonly -f os::build::release_sha + +# os::build::make_openshift_binary_symlinks makes symlinks for the openshift +# binary in _output/local/bin/${platform} +function os::build::make_openshift_binary_symlinks() { + platform=$(os::build::host_platform) +} +readonly -f os::build::make_openshift_binary_symlinks + +# DEPRECATED: will be removed +function os::build::ldflag() { + local key=${1} + local val=${2} + + echo "-X ${key}=${val}" +} +readonly -f os::build::ldflag + +# os::build::require_clean_tree exits if the current Git tree is not clean. +function os::build::require_clean_tree() { + if ! git diff-index --quiet HEAD -- || test "$(git ls-files --exclude-standard --others | wc -l)" != 0; then + echo "You can't have any staged or dirty files in $(pwd) for this command." + echo "Either commit them or unstage them to continue." + exit 1 + fi +} +readonly -f os::build::require_clean_tree + +# os::build::commit_range takes one or two arguments - if the first argument is an +# integer, it is assumed to be a pull request and the local origin/pr/# branch is +# used to determine the common range with the second argument. If the first argument +# is not an integer, it is assumed to be a Git commit range and output directly. +function os::build::commit_range() { + local remote + remote="${UPSTREAM_REMOTE:-origin}" + if [[ "$1" =~ ^-?[0-9]+$ ]]; then + local target + target="$(git rev-parse "${remote}/pr/$1")" + if [[ $? -ne 0 ]]; then + echo "Branch does not exist, or you have not configured ${remote}/pr/* style branches from GitHub" 1>&2 + exit 1 + fi + + local base + base="$(git merge-base "${target}" "$2")" + if [[ $? -ne 0 ]]; then + echo "Branch has no common commits with $2" 1>&2 + exit 1 + fi + if [[ "${base}" == "${target}" ]]; then + + # DO NOT TRUST THIS CODE + merged="$(git rev-list --reverse "${target}".."$2" --ancestry-path | head -1)" + if [[ -z "${merged}" ]]; then + echo "Unable to find the commit that merged ${remote}/pr/$1" 1>&2 + exit 1 + fi + #if [[ $? -ne 0 ]]; then + # echo "Unable to find the merge commit for $1: ${merged}" 1>&2 + # exit 1 + #fi + echo "++ pr/$1 appears to have merged at ${merged}" 1>&2 + leftparent="$(git rev-list --parents -n 1 "${merged}" | cut -f2 -d ' ')" + if [[ $? -ne 0 ]]; then + echo "Unable to find the left-parent for the merge of for $1" 1>&2 + exit 1 + fi + base="$(git merge-base "${target}" "${leftparent}")" + if [[ $? -ne 0 ]]; then + echo "Unable to find the common commit between ${leftparent} and $1" 1>&2 + exit 1 + fi + echo "${base}..${target}" + exit 0 + #echo "Branch has already been merged to upstream master, use explicit range instead" 1>&2 + #exit 1 + fi + + echo "${base}...${target}" + exit 0 + fi + + echo "$1" +} +readonly -f os::build::commit_range diff --git a/openshift-hack/lib/build/rpm.sh b/openshift-hack/lib/build/rpm.sh new file mode 100644 index 0000000000000..275602de6f067 --- /dev/null +++ b/openshift-hack/lib/build/rpm.sh @@ -0,0 +1,95 @@ +#!/usr/bin/env bash + +# This library holds utilities for building RPMs from Origin. + +# os::build::rpm::generate_nevra_vars determines the NEVRA of the RPMs +# that would be built from the current git state. +# +# Globals: +# - OS_GIT_VERSION +# Arguments: +# - None +# Exports: +# - OS_RPM_VERSION +# - OS_RPM_RELEASE +# - OS_RPM_ARCHITECTURE +function os::build::rpm::get_nvra_vars() { + # the package name can be overwritten but is normally 'origin' + OS_RPM_ARCHITECTURE="$(uname -i)" + + # we can extract the package version from the build version + os::build::version::get_vars + if [[ "${OS_GIT_VERSION}" =~ ^v([0-9](\.[0-9]+)*)(.*) ]]; then + OS_RPM_VERSION="${BASH_REMATCH[1]}" + metadata="${BASH_REMATCH[3]}" + else + os::log::fatal "Malformed \$OS_GIT_VERSION: ${OS_GIT_VERSION}" + fi + + # we can generate the package release from the git version metadata + # OS_GIT_VERSION will always have metadata, but either contain + # pre-release information _and_ build metadata, or only the latter. + # Build metadata may or may not contain the number of commits past + # the last tag. If no commit number exists, we are on a tag and use 0. + # ex. + # -alpha.0+shasums-123-dirty + # -alpha.0+shasums-123 + # -alpha.0+shasums-dirty + # -alpha.0+shasums + # +shasums-123-dirty + # +shasums-123 + # +shasums-dirty + # +shasums + if [[ "${metadata:0:1}" == "+" ]]; then + # we only have build metadata, but need to massage it so + # we can generate a valid RPM release from it + if [[ "${metadata}" =~ ^\+([a-z0-9]{7,40})(-([0-9]+))?(-dirty)?$ ]]; then + build_sha="${BASH_REMATCH[1]}" + build_num="${BASH_REMATCH[3]:-0}" + else + os::log::fatal "Malformed git version metadata: ${metadata}" + fi + OS_RPM_RELEASE="1.${build_num}.${build_sha}" + elif [[ "${metadata:0:1}" == "-" ]]; then + # we have both build metadata and pre-release info + if [[ "${metadata}" =~ ^-([^\+]+)\+([a-z0-9]{7,40})(-([0-9]+))?(-dirty)?$ ]]; then + pre_release="${BASH_REMATCH[1]}" + build_sha="${BASH_REMATCH[2]}" + build_num="${BASH_REMATCH[4]:-0}" + else + os::log::fatal "Malformed git version metadata: ${metadata}" + fi + OS_RPM_RELEASE="0.${pre_release}.${build_num}.${build_sha}" + else + os::log::fatal "Malformed git version metadata: ${metadata}" + fi + + OS_RPM_GIT_VARS=$( os::build::version::save_vars | tr '\n' ' ' ) + + export OS_RPM_VERSION OS_RPM_RELEASE OS_RPM_ARCHITECTURE OS_RPM_GIT_VARS +} + + +# os::build::rpm::format_nvra formats the rpm NVRA vars generated by +# os::build::rpm::get_nvra_vars and will generate them if necessary +# +# Globals: +# - OS_RPM_NAME +# - OS_RPM_VERSION +# - OS_RPM_RELEASE +# - OS_RPM_ARCHITECTURE +# Arguments: +# None +# Returns: +# None +function os::build::rpm::format_nvra() { + if [[ -z "${OS_RPM_VERSION:-}" || -z "${OS_RPM_RELEASE:-}" ]]; then + os::build::rpm::get_nvra_vars + fi + if [[ -z "${OS_RPM_NAME-}" ]]; then + OS_RPM_SPECFILE="$( find "${OS_ROOT}" -name '*.spec' )" + OS_RPM_NAME="$( rpmspec -q --qf '%{name}\n' "${OS_RPM_SPECFILE}" | head -1 )" + fi + + echo "${OS_RPM_NAME}-${OS_RPM_VERSION}-${OS_RPM_RELEASE}.${OS_RPM_ARCHITECTURE}" +} diff --git a/openshift-hack/lib/build/version.sh b/openshift-hack/lib/build/version.sh new file mode 100644 index 0000000000000..ea52257486f35 --- /dev/null +++ b/openshift-hack/lib/build/version.sh @@ -0,0 +1,88 @@ +#!/usr/bin/env bash + +# This library holds utility functions for determining +# product versions from Git repository state. + +# os::build::version::get_vars loads the standard version variables as +# ENV vars +function os::build::version::get_vars() { + if [[ -n "${OS_VERSION_FILE-}" ]]; then + if [[ -f "${OS_VERSION_FILE}" ]]; then + source "${OS_VERSION_FILE}" + return + fi + if [[ ! -d "${OS_ROOT}/.git" ]]; then + os::log::fatal "No version file at ${OS_VERSION_FILE}" + fi + os::log::warning "No version file at ${OS_VERSION_FILE}, falling back to git versions" + fi + os::build::version::git_vars +} +readonly -f os::build::version::get_vars + +# os::build::version::git_vars looks up the current Git vars if they have not been calculated. +function os::build::version::git_vars() { + if [[ -n "${OS_GIT_VERSION-}" ]]; then + return 0 + fi + + local git=(git --work-tree "${OS_ROOT}") + + if [[ -n ${OS_GIT_COMMIT-} ]] || OS_GIT_COMMIT=$("${git[@]}" rev-parse --short "HEAD^{commit}" 2>/dev/null); then + if [[ -z ${OS_GIT_TREE_STATE-} ]]; then + # Check if the tree is dirty. default to dirty + if git_status=$("${git[@]}" status --porcelain 2>/dev/null) && [[ -z ${git_status} ]]; then + OS_GIT_TREE_STATE="clean" + else + OS_GIT_TREE_STATE="dirty" + fi + fi + # Use git describe to find the version based on annotated tags. + if [[ -n ${OS_GIT_VERSION-} ]] || OS_GIT_VERSION=$(sed -rn 's/.*io.openshift.build.versions="kubernetes=(1.[0-9]+.[0-9]+(-rc.[0-9])?)"/v\1/p' openshift-hack/images/hyperkube/Dockerfile.rhel); then + # combine GIT_COMMIT with GIT_VERSION which is being read from the above Dockerfile + OS_GIT_VERSION+="+${OS_GIT_COMMIT:0:7}" + # Try to match the "git describe" output to a regex to try to extract + # the "major" and "minor" versions and whether this is the exact tagged + # version or whether the tree is between two tagged versions. + if [[ "${OS_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)\.([0-9]+)(\.[0-9]+)*([-].*)?$ ]]; then + OS_GIT_MAJOR=${BASH_REMATCH[1]} + OS_GIT_MINOR=${BASH_REMATCH[2]} + OS_GIT_PATCH=${BASH_REMATCH[3]} + fi + + if [[ "${OS_GIT_TREE_STATE}" == "dirty" ]]; then + # git describe --dirty only considers changes to existing files, but + # that is problematic since new untracked .go files affect the build, + # so use our idea of "dirty" from git status instead. + OS_GIT_VERSION+="-dirty" + fi + fi + fi + +} +readonly -f os::build::version::git_vars + +# Saves the environment flags to $1 +function os::build::version::save_vars() { + # Set the kube vars to the os vars to ensure correct versioning + # when using rpmbuild. This is necessary to ensure the kube build + # tooling correctly sets the version of binaries when building + # from source. + cat <> "${JUNIT_REPORT_OUTPUT:-/dev/null}" ) + os::test::junit::declare_test_end + return "${return_code}" +} +readonly -f os::cmd::internal::expect_exit_code_run_grep + +# os::cmd::internal::init_tempdir initializes the temporary directory +function os::cmd::internal::init_tempdir() { + mkdir -p "${os_cmd_internal_tmpdir}" + rm -f "${os_cmd_internal_tmpdir}"/tmp_std{out,err}.log +} +readonly -f os::cmd::internal::init_tempdir + +# os::cmd::internal::describe_call determines the file:line of the latest function call made +# from outside of this file in the call stack, and the name of the function being called from +# that line, returning a string describing the call +function os::cmd::internal::describe_call() { + local cmd=$1 + local cmd_eval_func=$2 + local grep_args=${3:-} + local test_eval_func=${4:-} + + local caller_id + caller_id=$(os::cmd::internal::determine_caller) + local full_name="${caller_id}: executing '${cmd}'" + + local cmd_expectation + cmd_expectation=$(os::cmd::internal::describe_expectation "${cmd_eval_func}") + local full_name="${full_name} expecting ${cmd_expectation}" + + if [[ -n "${grep_args}" ]]; then + local text_expecting= + case "${test_eval_func}" in + "os::cmd::internal::success_func") + text_expecting="text" ;; + "os::cmd::internal::failure_func") + text_expecting="not text" ;; + esac + full_name="${full_name} and ${text_expecting} '${grep_args}'" + fi + + echo "${full_name}" +} +readonly -f os::cmd::internal::describe_call + +# os::cmd::internal::determine_caller determines the file relative to the OpenShift Origin root directory +# and line number of the function call to the outer os::cmd wrapper function +function os::cmd::internal::determine_caller() { + local call_depth= + local len_sources="${#BASH_SOURCE[@]}" + for (( i=0; i>"${os_cmd_internal_tmpout}" 2>>"${os_cmd_internal_tmperr}" || result=$? + local result=${result:-0} # if we haven't set result yet, the command succeeded + + return "${result}" +} +readonly -f os::cmd::internal::run_collecting_output + +# os::cmd::internal::success_func determines if the input exit code denotes success +# this function returns 0 for false and 1 for true to be compatible with arithmetic tests +function os::cmd::internal::success_func() { + local exit_code=$1 + + # use a negated test to get output correct for (( )) + [[ "${exit_code}" -ne "0" ]] + return $? +} +readonly -f os::cmd::internal::success_func + +# os::cmd::internal::failure_func determines if the input exit code denotes failure +# this function returns 0 for false and 1 for true to be compatible with arithmetic tests +function os::cmd::internal::failure_func() { + local exit_code=$1 + + # use a negated test to get output correct for (( )) + [[ "${exit_code}" -eq "0" ]] + return $? +} +readonly -f os::cmd::internal::failure_func + +# os::cmd::internal::specific_code_func determines if the input exit code matches the given code +# this function returns 0 for false and 1 for true to be compatible with arithmetic tests +function os::cmd::internal::specific_code_func() { + local expected_code=$1 + local exit_code=$2 + + # use a negated test to get output correct for (( )) + [[ "${exit_code}" -ne "${expected_code}" ]] + return $? +} +readonly -f os::cmd::internal::specific_code_func + +# os::cmd::internal::get_results prints the stderr and stdout files +function os::cmd::internal::get_results() { + cat "${os_cmd_internal_tmpout}" "${os_cmd_internal_tmperr}" +} +readonly -f os::cmd::internal::get_results + +# os::cmd::internal::get_last_results prints the stderr and stdout from the last attempt +function os::cmd::internal::get_last_results() { + awk 'BEGIN { RS = "\x1e" } END { print $0 }' "${os_cmd_internal_tmpout}" + awk 'BEGIN { RS = "\x1e" } END { print $0 }' "${os_cmd_internal_tmperr}" +} +readonly -f os::cmd::internal::get_last_results + +# os::cmd::internal::mark_attempt marks the end of an attempt in the stdout and stderr log files +# this is used to make the try_until_* output more concise +function os::cmd::internal::mark_attempt() { + echo -e '\x1e' >> "${os_cmd_internal_tmpout}" + echo -e '\x1e' >> "${os_cmd_internal_tmperr}" +} +readonly -f os::cmd::internal::mark_attempt + +# os::cmd::internal::compress_output compresses an output file into timeline representation +function os::cmd::internal::compress_output() { + local logfile=$1 + + awk -f "${OS_ROOT}/hack/lib/compress.awk" "${logfile}" +} +readonly -f os::cmd::internal::compress_output + +# os::cmd::internal::print_results pretty-prints the stderr and stdout files. If attempt separators +# are present, this function returns a concise view of the stdout and stderr output files using a +# timeline format, where consecutive output lines that are the same are condensed into one line +# with a counter +function os::cmd::internal::print_results() { + if [[ -s "${os_cmd_internal_tmpout}" ]]; then + echo "Standard output from the command:" + if grep -q $'\x1e' "${os_cmd_internal_tmpout}"; then + os::cmd::internal::compress_output "${os_cmd_internal_tmpout}" + else + cat "${os_cmd_internal_tmpout}"; echo + fi + else + echo "There was no output from the command." + fi + + if [[ -s "${os_cmd_internal_tmperr}" ]]; then + echo "Standard error from the command:" + if grep -q $'\x1e' "${os_cmd_internal_tmperr}"; then + os::cmd::internal::compress_output "${os_cmd_internal_tmperr}" + else + cat "${os_cmd_internal_tmperr}"; echo + fi + else + echo "There was no error output from the command." + fi +} +readonly -f os::cmd::internal::print_results + +# os::cmd::internal::assemble_causes determines from the two input booleans which part of the test +# failed and generates a nice delimited list of failure causes +function os::cmd::internal::assemble_causes() { + local cmd_succeeded=$1 + local test_succeeded=$2 + + local causes=() + if (( ! cmd_succeeded )); then + causes+=("the command returned the wrong error code") + fi + if (( ! test_succeeded )); then + causes+=("the output content test failed") + fi + + local list + list=$(printf '; %s' "${causes[@]}") + echo "${list:2}" +} +readonly -f os::cmd::internal::assemble_causes + +# os::cmd::internal::run_until_exit_code runs the provided command until the exit code test given +# succeeds or the timeout given runs out. Output from the command to be tested is suppressed unless +# either `VERBOSE=1` or the test fails. This function bypasses any error exiting settings or traps +# set by upstream callers by masking the return code of the command with the return code of setting +# the result variable on failure. +# +# Globals: +# - JUNIT_REPORT_OUTPUT +# - VERBOSE +# Arguments: +# - 1: the command to run +# - 2: command evaluation assertion to use +# - 3: timeout duration +# - 4: interval duration +# Returns: +# - 0: if all assertions met before timeout +# - 1: if timeout occurs +function os::cmd::internal::run_until_exit_code() { + local cmd=$1 + local cmd_eval_func=$2 + local duration=$3 + local interval=$4 + + local -a junit_log + + os::cmd::internal::init_tempdir + os::test::junit::declare_test_start + + local description + description=$(os::cmd::internal::describe_call "${cmd}" "${cmd_eval_func}") + local duration_seconds + duration_seconds=$(echo "scale=3; $(( duration )) / 1000" | bc | xargs printf '%5.3f') + local description="${description}; re-trying every ${interval}s until completion or ${duration_seconds}s" + local preamble="Running ${description}..." + echo "${preamble}" + # for ease of parsing, we want the entire declaration on one line, so we replace '\n' with ';' + junit_log+=( "${description//$'\n'/;}" ) + + local start_time + start_time=$(os::cmd::internal::seconds_since_epoch) + + local deadline=$(( $(date +%s000) + duration )) + local cmd_succeeded=0 + while [ "$(date +%s000)" -lt $deadline ]; do + local cmd_result + cmd_result=$( os::cmd::internal::run_collecting_output "${cmd}"; echo $? ) + cmd_succeeded=$( ${cmd_eval_func} "${cmd_result}"; echo $? ) + if (( cmd_succeeded )); then + break + fi + sleep "${interval}" + os::cmd::internal::mark_attempt + done + + local end_time + end_time=$(os::cmd::internal::seconds_since_epoch) + local time_elapsed + time_elapsed=$(echo "scale=9; ${end_time} - ${start_time}" | bc | xargs printf '%5.3f') # in decimal seconds, we need leading zeroes for parsing later + + # clear the preamble so we can print out the success or error message + os::text::clear_string "${preamble}" + + local return_code + if (( cmd_succeeded )); then + os::text::print_green "SUCCESS after ${time_elapsed}s: ${description}" + junit_log+=( "SUCCESS after ${time_elapsed}s: ${description//$'\n'/;}" ) + + if [[ -n ${VERBOSE-} ]]; then + os::cmd::internal::print_results + fi + return_code=0 + else + os::text::print_red_bold "FAILURE after ${time_elapsed}s: ${description}: the command timed out" + junit_log+=( "FAILURE after ${time_elapsed}s: ${description//$'\n'/;}: the command timed out" ) + + os::text::print_red "$(os::cmd::internal::print_results)" + return_code=1 + fi + + junit_log+=( "$(os::cmd::internal::print_results)" ) + ( IFS=$'\n'; echo "${junit_log[*]}" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}" ) + os::test::junit::declare_test_end + return "${return_code}" +} +readonly -f os::cmd::internal::run_until_exit_code + +# os::cmd::internal::run_until_text runs the provided command until the assertion function succeeds with +# the given text on the command output or the timeout given runs out. This can be used to run until the +# output does or does not contain some text. Output from the command to be tested is suppressed unless +# either `VERBOSE=1` or the test fails. This function bypasses any error exiting settings or traps +# set by upstream callers by masking the return code of the command with the return code of setting +# the result variable on failure. +# +# Globals: +# - JUNIT_REPORT_OUTPUT +# - VERBOSE +# Arguments: +# - 1: the command to run +# - 2: text to test for +# - 3: text assertion to use +# - 4: timeout duration +# - 5: interval duration +# Returns: +# - 0: if all assertions met before timeout +# - 1: if timeout occurs +function os::cmd::internal::run_until_text() { + local cmd=$1 + local text=$2 + local test_eval_func=${3:-os::cmd::internal::success_func} + local duration=$4 + local interval=$5 + + local -a junit_log + + os::cmd::internal::init_tempdir + os::test::junit::declare_test_start + + local description + description=$(os::cmd::internal::describe_call "${cmd}" "" "${text}" "${test_eval_func}") + local duration_seconds + duration_seconds=$(echo "scale=3; $(( duration )) / 1000" | bc | xargs printf '%5.3f') + local description="${description}; re-trying every ${interval}s until completion or ${duration_seconds}s" + local preamble="Running ${description}..." + echo "${preamble}" + # for ease of parsing, we want the entire declaration on one line, so we replace '\n' with ';' + junit_log+=( "${description//$'\n'/;}" ) + + local start_time + start_time=$(os::cmd::internal::seconds_since_epoch) + + local deadline + deadline=$(( $(date +%s000) + duration )) + local test_succeeded=0 + while [ "$(date +%s000)" -lt $deadline ]; do + local cmd_result= + cmd_result=$( os::cmd::internal::run_collecting_output "${cmd}"; echo $? ) + local test_result + test_result=$( os::cmd::internal::run_collecting_output 'grep -Eq "'"${text}"'" <(os::cmd::internal::get_last_results)'; echo $? ) + test_succeeded=$( ${test_eval_func} "${test_result}"; echo $? ) + + if (( test_succeeded )); then + break + fi + sleep "${interval}" + os::cmd::internal::mark_attempt + done + + local end_time + end_time=$(os::cmd::internal::seconds_since_epoch) + local time_elapsed + time_elapsed=$(echo "scale=9; ${end_time} - ${start_time}" | bc | xargs printf '%5.3f') # in decimal seconds, we need leading zeroes for parsing later + + # clear the preamble so we can print out the success or error message + os::text::clear_string "${preamble}" + + local return_code + if (( test_succeeded )); then + os::text::print_green "SUCCESS after ${time_elapsed}s: ${description}" + junit_log+=( "SUCCESS after ${time_elapsed}s: ${description//$'\n'/;}" ) + + if [[ -n ${VERBOSE-} ]]; then + os::cmd::internal::print_results + fi + return_code=0 + else + os::text::print_red_bold "FAILURE after ${time_elapsed}s: ${description}: the command timed out" + junit_log+=( "FAILURE after ${time_elapsed}s: ${description//$'\n'/;}: the command timed out" ) + + os::text::print_red "$(os::cmd::internal::print_results)" + return_code=1 + fi + + junit_log+=( "$(os::cmd::internal::print_results)" ) + ( IFS=$'\n'; echo "${junit_log[*]}" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}" ) + os::test::junit::declare_test_end + return "${return_code}" +} +readonly -f os::cmd::internal::run_until_text diff --git a/openshift-hack/lib/constants.sh b/openshift-hack/lib/constants.sh new file mode 100755 index 0000000000000..3552d53115d68 --- /dev/null +++ b/openshift-hack/lib/constants.sh @@ -0,0 +1,324 @@ +#!/usr/bin/env bash + +# This script provides constants for the Golang binary build process + +readonly OS_GO_PACKAGE=github.com/openshift/origin + +readonly OS_BUILD_ENV_GOLANG="${OS_BUILD_ENV_GOLANG:-1.15}" +readonly OS_BUILD_ENV_IMAGE="${OS_BUILD_ENV_IMAGE:-openshift/origin-release:golang-${OS_BUILD_ENV_GOLANG}}" +readonly OS_REQUIRED_GO_VERSION="go${OS_BUILD_ENV_GOLANG}" +readonly OS_GLIDE_MINOR_VERSION="13" +readonly OS_REQUIRED_GLIDE_VERSION="0.$OS_GLIDE_MINOR_VERSION" + +readonly OS_GOFLAGS_TAGS="include_gcs include_oss containers_image_openpgp" +readonly OS_GOFLAGS_TAGS_LINUX_AMD64="gssapi selinux" +readonly OS_GOFLAGS_TAGS_LINUX_S390X="gssapi selinux" +readonly OS_GOFLAGS_TAGS_LINUX_ARM64="gssapi selinux" +readonly OS_GOFLAGS_TAGS_LINUX_PPC64LE="gssapi selinux" + +readonly OS_OUTPUT_BASEPATH="${OS_OUTPUT_BASEPATH:-_output}" +readonly OS_BASE_OUTPUT="${OS_ROOT}/${OS_OUTPUT_BASEPATH}" +readonly OS_OUTPUT_SCRIPTPATH="${OS_OUTPUT_SCRIPTPATH:-"${OS_BASE_OUTPUT}/scripts"}" + +readonly OS_OUTPUT_SUBPATH="${OS_OUTPUT_SUBPATH:-${OS_OUTPUT_BASEPATH}/local}" +readonly OS_OUTPUT="${OS_ROOT}/${OS_OUTPUT_SUBPATH}" +readonly OS_OUTPUT_RELEASEPATH="${OS_OUTPUT}/releases" +readonly OS_OUTPUT_RPMPATH="${OS_OUTPUT_RELEASEPATH}/rpms" +readonly OS_OUTPUT_BINPATH="${OS_OUTPUT}/bin" +readonly OS_OUTPUT_PKGDIR="${OS_OUTPUT}/pkgdir" + +readonly OS_IMAGE_COMPILE_TARGETS_LINUX=( + vendor/k8s.io/kubernetes/cmd/kube-apiserver + vendor/k8s.io/kubernetes/cmd/kube-controller-manager + vendor/k8s.io/kubernetes/cmd/kube-scheduler + vendor/k8s.io/kubernetes/cmd/kubelet +) +readonly OS_SCRATCH_IMAGE_COMPILE_TARGETS_LINUX=( + "" +) +readonly OS_IMAGE_COMPILE_BINARIES=("${OS_SCRATCH_IMAGE_COMPILE_TARGETS_LINUX[@]##*/}" "${OS_IMAGE_COMPILE_TARGETS_LINUX[@]##*/}") + +readonly OS_GOVET_BLACKLIST=( +) + +#If you update this list, be sure to get the images/origin/Dockerfile +readonly OS_BINARY_RELEASE_SERVER_LINUX=( + './*' +) +readonly OS_BINARY_RELEASE_CLIENT_EXTRA=( + ${OS_ROOT}/README.md + ${OS_ROOT}/LICENSE +) + +# os::build::get_product_vars exports variables that we expect to change +# depending on the distribution of Origin +function os::build::get_product_vars() { + export OS_BUILD_LDFLAGS_IMAGE_PREFIX="${OS_IMAGE_PREFIX:-"openshift/origin"}" + export OS_BUILD_LDFLAGS_DEFAULT_IMAGE_STREAMS="${OS_BUILD_LDFLAGS_DEFAULT_IMAGE_STREAMS:-"centos7"}" +} + +# os::build::ldflags calculates the -ldflags argument for building OpenShift +function os::build::ldflags() { + # Run this in a subshell to prevent settings/variables from leaking. + set -o errexit + set -o nounset + set -o pipefail + + cd "${OS_ROOT}" + + os::build::version::get_vars + os::build::get_product_vars + + local buildDate="$(date -u +'%Y-%m-%dT%H:%M:%SZ')" + + declare -a ldflags=( + "-s" + "-w" + ) + + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/version.majorFromGit" "${OS_GIT_MAJOR}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/version.minorFromGit" "${OS_GIT_MINOR}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/version.versionFromGit" "${OS_GIT_VERSION}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/version.commitFromGit" "${OS_GIT_COMMIT}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/version.gitTreeState" "${OS_GIT_TREE_STATE}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/version.buildDate" "${buildDate}")) + + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/component-base/version.gitMajor" "${KUBE_GIT_MAJOR}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/component-base/version.gitMinor" "${KUBE_GIT_MINOR}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/component-base/version.gitCommit" "${OS_GIT_COMMIT}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/component-base/version.gitVersion" "${KUBE_GIT_VERSION}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/component-base/version.buildDate" "${buildDate}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/component-base/version.gitTreeState" "clean")) + + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/client-go/pkg/version.gitMajor" "${KUBE_GIT_MAJOR}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/client-go/pkg/version.gitMinor" "${KUBE_GIT_MINOR}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/client-go/pkg/version.gitCommit" "${OS_GIT_COMMIT}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/client-go/pkg/version.gitVersion" "${KUBE_GIT_VERSION}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/client-go/pkg/version.buildDate" "${buildDate}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/client-go/pkg/version.gitTreeState" "clean") +) + + # The -ldflags parameter takes a single string, so join the output. + echo "${ldflags[*]-}" +} +readonly -f os::build::ldflags + +# os::util::list_go_src_files lists files we consider part of our project +# source code, useful for tools that iterate over source to provide vet- +# ting or linting, etc. +# +# Globals: +# None +# Arguments: +# None +# Returns: +# None +function os::util::list_go_src_files() { + find . -not \( \ + \( \ + -wholename './_output' \ + -o -wholename './.*' \ + -o -wholename './pkg/assets/bindata.go' \ + -o -wholename './pkg/assets/*/bindata.go' \ + -o -wholename './pkg/oc/clusterup/manifests/bindata.go' \ + -o -wholename './openshift.local.*' \ + -o -wholename './test/extended/testdata/bindata.go' \ + -o -wholename '*/vendor/*' \ + -o -wholename './assets/bower_components/*' \ + \) -prune \ + \) -name '*.go' | sort -u +} +readonly -f os::util::list_go_src_files + +# os::util::list_go_src_dirs lists dirs in origin/ and cmd/ dirs excluding +# doc.go, useful for tools that iterate over source to provide vetting or +# linting, or for godep-save etc. +# +# Globals: +# None +# Arguments: +# None +# Returns: +# None +function os::util::list_go_src_dirs() { + go list -e ./... | grep -Ev "/(third_party|vendor|staging|clientset_generated)/" | LC_ALL=C sort -u +} +readonly -f os::util::list_go_src_dirs + +# os::util::list_go_deps outputs the list of dependencies for the project. +function os::util::list_go_deps() { + go list -f '{{.ImportPath}}{{.Imports}}' ./test/... ./pkg/... ./cmd/... ./vendor/k8s.io/... | tr '[]' ' ' | + sed -e 's|github.com/openshift/origin/vendor/||g' | + sed -e 's|k8s.io/kubernetes/staging/src/||g' +} + +# os::util::list_test_packages_under lists all packages containing Golang test files that we +# want to run as unit tests under the given base dir in the source tree +function os::util::list_test_packages_under() { + local basedir=$* + + # we do not quote ${basedir} to allow for multiple arguments to be passed in as well as to allow for + # arguments that use expansion, e.g. paths containing brace expansion or wildcards + # we do not quote ${basedir} to allow for multiple arguments to be passed in as well as to allow for + # arguments that use expansion, e.g. paths containing brace expansion or wildcards + find ${basedir} -not \( \ + \( \ + -path 'vendor' \ + -o -path '*_output' \ + -o -path '*.git' \ + -o -path '*openshift.local.*' \ + -o -path '*vendor/*' \ + -o -path '*assets/node_modules' \ + -o -path '*test/*' \ + -o -path '*pkg/proxy' \ + -o -path '*k8s.io/kubernetes/cluster/gce*' \ + \) -prune \ + \) -name '*_test.go' | xargs -n1 dirname | sort -u | xargs -n1 printf "${OS_GO_PACKAGE}/%s\n" + + local kubernetes_path="vendor/k8s.io/kubernetes" + + if [[ -n "${TEST_KUBE-}" ]]; then + # we need to find all of the kubernetes test suites, excluding those we directly whitelisted before, the end-to-end suite, and + # cmd wasn't done before using glide and constantly flakes + # the forked etcd packages are used only by the gce etcd containers + find -L vendor/k8s.io/{api,apimachinery,apiserver,client-go,kube-aggregator,kubernetes} -not \( \ + \( \ + -path "${kubernetes_path}/staging" \ + -o -path "${kubernetes_path}/cmd" \ + -o -path "${kubernetes_path}/test" \ + -o -path "${kubernetes_path}/third_party/forked/etcd*" \ + -o -path "${kubernetes_path}/cluster/gce" \ + \) -prune \ + \) -name '*_test.go' | cut -f 2- -d / | xargs -n1 dirname | sort -u | xargs -n1 printf "${OS_GO_PACKAGE}/vendor/%s\n" + else + echo "${OS_GO_PACKAGE}/vendor/k8s.io/api/..." + echo "${OS_GO_PACKAGE}/vendor/k8s.io/kubernetes/pkg/api/..." + echo "${OS_GO_PACKAGE}/vendor/k8s.io/kubernetes/pkg/apis/..." + fi +} +readonly -f os::util::list_test_packages_under + +# Generates the .syso file used to add compile-time VERSIONINFO metadata to the +# Windows binary. +function os::build::generate_windows_versioninfo() { + os::build::version::get_vars + local major="${OS_GIT_MAJOR}" + local minor="${OS_GIT_MINOR%+}" + local patch="${OS_GIT_PATCH}" + local windows_versioninfo_file=`mktemp --suffix=".versioninfo.json"` + cat <"${windows_versioninfo_file}" +{ + "FixedFileInfo": + { + "FileVersion": { + "Major": ${major}, + "Minor": ${minor}, + "Patch": ${patch} + }, + "ProductVersion": { + "Major": ${major}, + "Minor": ${minor}, + "Patch": ${patch} + }, + "FileFlagsMask": "3f", + "FileFlags ": "00", + "FileOS": "040004", + "FileType": "01", + "FileSubType": "00" + }, + "StringFileInfo": + { + "Comments": "", + "CompanyName": "Red Hat, Inc.", + "InternalName": "openshift client", + "FileVersion": "${OS_GIT_VERSION}", + "InternalName": "oc", + "LegalCopyright": "© Red Hat, Inc. Licensed under the Apache License, Version 2.0", + "LegalTrademarks": "", + "OriginalFilename": "oc.exe", + "PrivateBuild": "", + "ProductName": "OpenShift Client", + "ProductVersion": "${OS_GIT_VERSION}", + "SpecialBuild": "" + }, + "VarFileInfo": + { + "Translation": { + "LangID": "0409", + "CharsetID": "04B0" + } + } +} +EOF + goversioninfo -o ${OS_ROOT}/vendor/github.com/openshift/oc/cmd/oc/oc.syso ${windows_versioninfo_file} +} +readonly -f os::build::generate_windows_versioninfo + +# Removes the .syso file used to add compile-time VERSIONINFO metadata to the +# Windows binary. +function os::build::clean_windows_versioninfo() { + rm ${OS_ROOT}/vendor/github.com/openshift/oc/cmd/oc/oc.syso +} +readonly -f os::build::clean_windows_versioninfo + +# OS_ALL_IMAGES is the list of images built by os::build::images. +readonly OS_ALL_IMAGES=( + origin-hyperkube + origin-tests +) + +# os::build::check_binaries ensures that binary sizes do not grow without approval. +function os::build::check_binaries() { + platform=$(os::build::host_platform) + if [[ "${platform}" != "linux/amd64" && "${platform}" != "darwin/amd64" ]]; then + return 0 + fi + duexe="du" + + # In OSX, the 'du' binary does not provide the --apparent-size flag. However, the homebrew + # provide GNU coreutils which provide 'gdu' binary which is equivalent to Linux du. + # For now, if the 'gdu' binary is not installed, print annoying warning and don't check the + # binary size (the CI will capture possible violation anyway). + if [[ "${platform}" == "darwin/amd64" ]]; then + duexe=$(which gdu || true) + if [[ -z "${duexe}" ]]; then + os::log::warning "Unable to locate 'gdu' binary to determine size of the binary. Please install it using: 'brew install coreutils'" + return 0 + fi + fi + + if [[ -f "${OS_OUTPUT_BINPATH}/${platform}/pod" ]]; then + size=$($duexe --apparent-size -m "${OS_OUTPUT_BINPATH}/${platform}/pod" | cut -f 1) + if [[ "${size}" -gt "2" ]]; then + os::log::fatal "pod binary has grown substantially to ${size}. You must have approval before bumping this limit." + fi + fi +} + +# os::build::images builds all images in this repo. +function os::build::images() { + # Create link to file if the FS supports hardlinks, otherwise copy the file + function ln_or_cp { + local src_file=$1 + local dst_dir=$2 + if os::build::archive::internal::is_hardlink_supported "${dst_dir}" ; then + ln -f "${src_file}" "${dst_dir}" + else + cp -pf "${src_file}" "${dst_dir}" + fi + } + + # determine the correct tag prefix + tag_prefix="${OS_IMAGE_PREFIX:-"openshift/origin"}" + + # images that depend on "${tag_prefix}-source" or "${tag_prefix}-base" + ( os::build::image "${tag_prefix}-hyperkube" images/hyperkube ) & + + for i in $(jobs -p); do wait "$i"; done + + # images that depend on "${tag_prefix}-cli" or hyperkube + ( os::build::image "${tag_prefix}-tests" images/tests ) & + + for i in $(jobs -p); do wait "$i"; done +} +readonly -f os::build::images diff --git a/openshift-hack/lib/deps.sh b/openshift-hack/lib/deps.sh new file mode 100644 index 0000000000000..6a9009823de1e --- /dev/null +++ b/openshift-hack/lib/deps.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# os::deps::path_with_shellcheck returns a path that includes shellcheck. +# +# Globals: +# None +# Arguments: +# None +# Returns: +# The path that includes shellcheck. +function os::deps::path_with_shellcheck() { + local path="${PATH}" + if ! which shellcheck &> /dev/null; then + local shellcheck_path="${TMPDIR:-/tmp}/shellcheck" + mkdir -p "${shellcheck_path}" + pushd "${shellcheck_path}" > /dev/null || exit 1 + # This version needs to match that required by + # hack/verify-shellcheck.sh to avoid the use of docker. + local version="v0.7.0" + local tar_file="shellcheck-${version}.linux.x86_64.tar.xz" + curl -LO "https://github.com/koalaman/shellcheck/releases/download/${version}/${tar_file}" + tar xf "${tar_file}" + path="${PATH}:$(pwd)/shellcheck-${version}" + popd > /dev/null || exit 1 + fi + echo "${path}" +} +readonly -f os::deps::path_with_shellcheck diff --git a/openshift-hack/lib/init.sh b/openshift-hack/lib/init.sh new file mode 100755 index 0000000000000..00321b0ff7137 --- /dev/null +++ b/openshift-hack/lib/init.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash + +# This script is meant to be the entrypoint for OpenShift Bash scripts to import all of the support +# libraries at once in order to make Bash script preambles as minimal as possible. This script recur- +# sively `source`s *.sh files in this directory tree. As such, no files should be `source`ed outside +# of this script to ensure that we do not attempt to overwrite read-only variables. + +set -o errexit +set -o nounset +set -o pipefail + +OS_SCRIPT_START_TIME="$( date +%s )"; export OS_SCRIPT_START_TIME + +# os::util::absolute_path returns the absolute path to the directory provided +function os::util::absolute_path() { + local relative_path="$1" + local absolute_path + + pushd "${relative_path}" >/dev/null + relative_path="$( pwd )" + if [[ -h "${relative_path}" ]]; then + absolute_path="$( readlink "${relative_path}" )" + else + absolute_path="${relative_path}" + fi + popd >/dev/null + + echo "${absolute_path}" +} +readonly -f os::util::absolute_path + +# find the absolute path to the root of the Origin source tree +init_source="$( dirname "${BASH_SOURCE[0]}" )/../.." +OS_ROOT="$( os::util::absolute_path "${init_source}" )" +export OS_ROOT +cd "${OS_ROOT}" + +for library_file in $( find "${OS_ROOT}/openshift-hack/lib" -type f -name '*.sh' -not -path '*/openshift-hack/lib/init.sh' ); do + source "${library_file}" +done + +unset library_files library_file init_source + +# all of our Bash scripts need to have the stacktrace +# handler installed to deal with errors +os::log::stacktrace::install + +# All of our Bash scripts need to have access to the +# binaries that we build so we don't have to find +# them before every invocation. +os::util::environment::update_path_var + +if [[ -z "${OS_TMP_ENV_SET-}" ]]; then + # if this file is run via 'source', then $0 will be "-bash" and won't work with basename + if [[ "${0}" =~ .*\.sh ]]; then + os::util::environment::setup_tmpdir_vars "$( basename "${0}" ".sh" )" + else + os::util::environment::setup_tmpdir_vars "shell" + fi +fi + +# Allow setting $JUNIT_REPORT to toggle output behavior +if [[ -n "${JUNIT_REPORT:-}" ]]; then + export JUNIT_REPORT_OUTPUT="${LOG_DIR}/raw_test_output.log" +fi + +# Use the go version from the system +export FORCE_HOST_GO=1 diff --git a/openshift-hack/lib/log/output.sh b/openshift-hack/lib/log/output.sh new file mode 100644 index 0000000000000..103fa1ff1bee5 --- /dev/null +++ b/openshift-hack/lib/log/output.sh @@ -0,0 +1,104 @@ +#!/usr/bin/env bash + +# This file contains functions used for writing log messages +# to stdout and stderr from scripts while they run. + +# os::log::info writes the message to stdout. +# +# Arguments: +# - all: message to write +function os::log::info() { + local message; message="$( os::log::internal::prefix_lines "[INFO]" "$*" )" + os::log::internal::to_logfile "${message}" + echo "${message}" +} +readonly -f os::log::info + +# os::log::warning writes the message to stderr. +# A warning indicates something went wrong but +# not so wrong that we cannot recover. +# +# Arguments: +# - all: message to write +function os::log::warning() { + local message; message="$( os::log::internal::prefix_lines "[WARNING]" "$*" )" + os::log::internal::to_logfile "${message}" + os::text::print_yellow "${message}" 1>&2 +} +readonly -f os::log::warning + +# os::log::error writes the message to stderr. +# An error indicates that something went wrong +# and we will most likely fail after this. +# +# Arguments: +# - all: message to write +function os::log::error() { + local message; message="$( os::log::internal::prefix_lines "[ERROR]" "$*" )" + os::log::internal::to_logfile "${message}" + os::text::print_red "${message}" 1>&2 +} +readonly -f os::log::error + +# os::log::fatal writes the message to stderr and +# returns a non-zero code to force a process exit. +# A fatal error indicates that there is no chance +# of recovery. +# +# Arguments: +# - all: message to write +function os::log::fatal() { + local message; message="$( os::log::internal::prefix_lines "[FATAL]" "$*" )" + os::log::internal::to_logfile "${message}" + os::text::print_red "${message}" 1>&2 + exit 1 +} +readonly -f os::log::fatal + +# os::log::debug writes the message to stderr if +# the ${OS_DEBUG} variable is set. +# +# Globals: +# - OS_DEBUG +# Arguments: +# - all: message to write +function os::log::debug() { + local message; message="$( os::log::internal::prefix_lines "[DEBUG]" "$*" )" + os::log::internal::to_logfile "${message}" + if [[ -n "${OS_DEBUG:-}" ]]; then + os::text::print_blue "${message}" 1>&2 + fi +} +readonly -f os::log::debug + +# os::log::internal::to_logfile makes a best-effort +# attempt to write the message to the script logfile +# +# Globals: +# - LOG_DIR +# Arguments: +# - all: message to write +function os::log::internal::to_logfile() { + if [[ -n "${LOG_DIR:-}" && -d "${LOG_DIR-}" ]]; then + echo "$*" >>"${LOG_DIR}/scripts.log" + fi +} + +# os::log::internal::prefix_lines prints out the +# original content with the given prefix at the +# start of every line. +# +# Arguments: +# - 1: prefix for lines +# - 2: content to prefix +function os::log::internal::prefix_lines() { + local prefix="$1" + local content="$2" + + local old_ifs="${IFS}" + IFS=$'\n' + for line in ${content}; do + echo "${prefix} ${line}" + done + IFS="${old_ifs}" +} \ No newline at end of file diff --git a/openshift-hack/lib/log/stacktrace.sh b/openshift-hack/lib/log/stacktrace.sh new file mode 100644 index 0000000000000..e9915efb6342f --- /dev/null +++ b/openshift-hack/lib/log/stacktrace.sh @@ -0,0 +1,91 @@ +#!/usr/bin/env bash +# +# This library contains an implementation of a stack trace for Bash scripts. + +# os::log::stacktrace::install installs the stacktrace as a handler for the ERR signal if one +# has not already been installed and sets `set -o errtrace` in order to propagate the handler +# If the ERR trap is not initialized, installing this plugin will initialize it. +# +# Globals: +# None +# Arguments: +# None +# Returns: +# - export OS_USE_STACKTRACE +function os::log::stacktrace::install() { + # setting 'errtrace' propagates our ERR handler to functions, expansions and subshells + set -o errtrace + + # OS_USE_STACKTRACE is read by os::util::trap at runtime to request a stacktrace + export OS_USE_STACKTRACE=true + + os::util::trap::init_err +} +readonly -f os::log::stacktrace::install + +# os::log::stacktrace::print prints the stacktrace and exits with the return code from the script that +# called for a stack trace. This function will always return 0 if it is not handling the signal, and if it +# is handling the signal, this function will always `exit`, not return, the return code it receives as +# its first argument. +# +# Globals: +# - BASH_SOURCE +# - BASH_LINENO +# - FUNCNAME +# Arguments: +# - 1: the return code of the command in the script that generated the ERR signal +# - 2: the last command that ran before handlers were invoked +# - 3: whether or not `set -o errexit` was set in the script that generated the ERR signal +# Returns: +# None +function os::log::stacktrace::print() { + local return_code=$1 + local last_command=$2 + local errexit_set=${3:-} + + if [[ "${return_code}" = "0" ]]; then + # we're not supposed to respond when no error has occurred + return 0 + fi + + if [[ -z "${errexit_set}" ]]; then + # if errexit wasn't set in the shell when the ERR signal was issued, then we can ignore the signal + # as this is not cause for failure + return 0 + fi + + # dump the entire stack for debugging purposes + os::log::debug "$( os::util::repository_relative_path "${BASH_SOURCE[0]}:${LINENO}: ${BASH_COMMAND}" )" + for (( i = 0; i < ${#BASH_LINENO[@]}; i++ )); do + os::log::debug "$( os::util::repository_relative_path "${BASH_SOURCE[$i+1]:-"$( os::util::repository_relative_path "$0" )"}" ):${BASH_LINENO[$i]}: ${FUNCNAME[$i]}" + done + + # iterate backwards through the stack until we leave library files, so we can be sure we start logging + # actual script code and not this handler's call + local stack_begin_index + for (( stack_begin_index = 0; stack_begin_index < ${#BASH_SOURCE[@]}; stack_begin_index++ )); do + if [[ ! "${BASH_SOURCE[${stack_begin_index}]}" =~ hack/lib/(log/stacktrace|util/trap)\.sh ]]; then + break + fi + done + + local preamble_finished + local stack_index=1 + local i + for (( i = stack_begin_index; i < ${#BASH_SOURCE[@]}; i++ )); do + local bash_source + bash_source="$( os::util::repository_relative_path "${BASH_SOURCE[$i]}" )" + if [[ -z "${preamble_finished:-}" ]]; then + preamble_finished=true + os::log::error "${bash_source}:${BASH_LINENO[$i-1]}: \`${last_command}\` exited with status ${return_code}." >&2 + exit "${return_code}" + fi + stack_index=$(( stack_index + 1 )) + done + + # we know we're the privileged handler in this chain, so we can safely exit the shell without + # starving another handler of the privilege of reacting to this signal + os::log::info " Exiting with code ${return_code}." >&2 + exit "${return_code}" +} +readonly -f os::log::stacktrace::print diff --git a/openshift-hack/lib/test/junit.sh b/openshift-hack/lib/test/junit.sh new file mode 100644 index 0000000000000..18bb3ee857d2c --- /dev/null +++ b/openshift-hack/lib/test/junit.sh @@ -0,0 +1,202 @@ +#!/usr/bin/env bash +# This utility file contains functions that format test output to be parsed into jUnit XML + +# os::test::junit::declare_suite_start prints a message declaring the start of a test suite +# Any number of suites can be in flight at any time, so there is no failure condition for this +# script based on the number of suites in flight. +# +# Globals: +# - JUNIT_REPORT_OUTPUT +# - NUM_OS_JUNIT_SUITES_IN_FLIGHT +# Arguments: +# - 1: the suite name that is starting +# Returns: +# - increment NUM_OS_JUNIT_SUITES_IN_FLIGHT +function os::test::junit::declare_suite_start() { + local suite_name=$1 + local num_suites=${NUM_OS_JUNIT_SUITES_IN_FLIGHT:-0} + + echo "=== BEGIN TEST SUITE github.com/openshift/origin/test/${suite_name} ===" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}" + NUM_OS_JUNIT_SUITES_IN_FLIGHT=$(( num_suites + 1 )) + export NUM_OS_JUNIT_SUITES_IN_FLIGHT +} +readonly -f os::test::junit::declare_suite_start + +# os::test::junit::declare_suite_end prints a message declaring the end of a test suite +# If there aren't any suites in flight, this function will fail. +# +# Globals: +# - JUNIT_REPORT_OUTPUT +# - NUM_OS_JUNIT_SUITES_IN_FLIGHT +# Arguments: +# - 1: the suite name that is starting +# Returns: +# - export/decrement NUM_OS_JUNIT_SUITES_IN_FLIGHT +function os::test::junit::declare_suite_end() { + local num_suites=${NUM_OS_JUNIT_SUITES_IN_FLIGHT:-0} + if [[ "${num_suites}" -lt "1" ]]; then + # we can't end a suite if none have been started yet + echo "[ERROR] jUnit suite marker could not be placed, expected suites in flight, got ${num_suites}" + return 1 + fi + + echo "=== END TEST SUITE ===" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}" + NUM_OS_JUNIT_SUITES_IN_FLIGHT=$(( num_suites - 1 )) + export NUM_OS_JUNIT_SUITES_IN_FLIGHT +} +readonly -f os::test::junit::declare_suite_end + +# os::test::junit::declare_test_start prints a message declaring the start of a test case +# If there is already a test marked as being in flight, this function will fail. +# +# Globals: +# - JUNIT_REPORT_OUTPUT +# - NUM_OS_JUNIT_TESTS_IN_FLIGHT +# Arguments: +# None +# Returns: +# - increment NUM_OS_JUNIT_TESTS_IN_FLIGHT +function os::test::junit::declare_test_start() { + local num_tests=${NUM_OS_JUNIT_TESTS_IN_FLIGHT:-0} + if [[ "${num_tests}" -ne "0" ]]; then + # someone's declaring the starting of a test when a test is already in flight + echo "[ERROR] jUnit test marker could not be placed, expected no tests in flight, got ${num_tests}" + return 1 + fi + + local num_suites=${NUM_OS_JUNIT_SUITES_IN_FLIGHT:-0} + if [[ "${num_suites}" -lt "1" ]]; then + # we can't end a test if no suites are in flight + echo "[ERROR] jUnit test marker could not be placed, expected suites in flight, got ${num_suites}" + return 1 + fi + + echo "=== BEGIN TEST CASE ===" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}" + NUM_OS_JUNIT_TESTS_IN_FLIGHT=$(( num_tests + 1 )) + export NUM_OS_JUNIT_TESTS_IN_FLIGHT +} +readonly -f os::test::junit::declare_test_start + +# os::test::junit::declare_test_end prints a message declaring the end of a test case +# If there is no test marked as being in flight, this function will fail. +# +# Globals: +# - JUNIT_REPORT_OUTPUT +# - NUM_OS_JUNIT_TESTS_IN_FLIGHT +# Arguments: +# None +# Returns: +# - decrement NUM_OS_JUNIT_TESTS_IN_FLIGHT +function os::test::junit::declare_test_end() { + local num_tests=${NUM_OS_JUNIT_TESTS_IN_FLIGHT:-0} + if [[ "${num_tests}" -ne "1" ]]; then + # someone's declaring the end of a test when a test is not in flight + echo "[ERROR] jUnit test marker could not be placed, expected one test in flight, got ${num_tests}" + return 1 + fi + + echo "=== END TEST CASE ===" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}" + NUM_OS_JUNIT_TESTS_IN_FLIGHT=$(( num_tests - 1 )) + export NUM_OS_JUNIT_TESTS_IN_FLIGHT +} +readonly -f os::test::junit::declare_test_end + +# os::test::junit::check_test_counters checks that we do not have any test suites or test cases in flight +# This function should be called at the very end of any test script using jUnit markers to make sure no error in +# marking has occurred. +# +# Globals: +# - NUM_OS_JUNIT_SUITES_IN_FLIGHT +# - NUM_OS_JUNIT_TESTS_IN_FLIGHT +# Arguments: +# None +# Returns: +# None +function os::test::junit::check_test_counters() { + if [[ "${NUM_OS_JUNIT_SUITES_IN_FLIGHT-}" -ne "0" ]]; then + echo "[ERROR] Expected no test suites to be marked as in-flight at the end of testing, got ${NUM_OS_JUNIT_SUITES_IN_FLIGHT-}" + return 1 + elif [[ "${NUM_OS_JUNIT_TESTS_IN_FLIGHT-}" -ne "0" ]]; then + echo "[ERROR] Expected no test cases to be marked as in-flight at the end of testing, got ${NUM_OS_JUNIT_TESTS_IN_FLIGHT-}" + return 1 + fi +} +readonly -f os::test::junit::check_test_counters + +# os::test::junit::reconcile_output appends the necessary suite and test end statements to the jUnit output file +# in order to ensure that the file is in a consistent state to allow for parsing +# +# Globals: +# - NUM_OS_JUNIT_SUITES_IN_FLIGHT +# - NUM_OS_JUNIT_TESTS_IN_FLIGHT +# Arguments: +# None +# Returns: +# None +function os::test::junit::reconcile_output() { + if [[ "${NUM_OS_JUNIT_TESTS_IN_FLIGHT:-0}" = "1" ]]; then + os::test::junit::declare_test_end + fi + + for (( i = 0; i < ${NUM_OS_JUNIT_SUITES_IN_FLIGHT:-0}; i++ )); do + os::test::junit::declare_suite_end + done +} +readonly -f os::test::junit::reconcile_output + +# os::test::junit::generate_report determines which type of report is to +# be generated and does so from the raw output of the tests. +# +# Globals: +# - JUNIT_REPORT_OUTPUT +# - ARTIFACT_DIR +# Arguments: +# None +# Returns: +# None +function os::test::junit::generate_report() { + if [[ -z "${JUNIT_REPORT_OUTPUT:-}" || + -n "${JUNIT_REPORT_OUTPUT:-}" && ! -s "${JUNIT_REPORT_OUTPUT:-}" ]]; then + # we can't generate a report + return 0 + fi + + if grep -q "=== END TEST CASE ===" "${JUNIT_REPORT_OUTPUT}"; then + os::test::junit::reconcile_output + os::test::junit::check_test_counters + os::test::junit::internal::generate_report "oscmd" + fi +} + +# os::test::junit::internal::generate_report generates an XML jUnit +# report for either `os::cmd` or `go test`, based on the passed +# argument. If the `junitreport` binary is not present, it will be built. +# +# Globals: +# - JUNIT_REPORT_OUTPUT +# - ARTIFACT_DIR +# Arguments: +# - 1: specify which type of tests command output should junitreport read +# Returns: +# export JUNIT_REPORT_NUM_FAILED +function os::test::junit::internal::generate_report() { + local report_type="$1" + os::util::ensure::built_binary_exists 'junitreport' + + local report_file + report_file="$( mktemp "${ARTIFACT_DIR}/${report_type}_report_XXXXX" ).xml" + os::log::info "jUnit XML report placed at $( os::util::repository_relative_path "${report_file}" )" + junitreport --type "${report_type}" \ + --suites nested \ + --roots github.com/openshift/origin \ + --output "${report_file}" \ + <"${JUNIT_REPORT_OUTPUT}" + + local summary + summary=$( junitreport summarize <"${report_file}" ) + + JUNIT_REPORT_NUM_FAILED="$( grep -oE "[0-9]+ failed" <<<"${summary}" )" + export JUNIT_REPORT_NUM_FAILED + + echo "${summary}" +} diff --git a/openshift-hack/lib/util/ensure.sh b/openshift-hack/lib/util/ensure.sh new file mode 100644 index 0000000000000..158d94f984f01 --- /dev/null +++ b/openshift-hack/lib/util/ensure.sh @@ -0,0 +1,116 @@ +#!/usr/bin/env bash + +# This script contains helper functions for ensuring that dependencies +# exist on a host system that are required to run Origin scripts. + +# os::util::ensure::system_binary_exists ensures that the +# given binary exists on the system in the $PATH. +# +# Globals: +# None +# Arguments: +# - 1: binary to search for +# Returns: +# None +function os::util::ensure::system_binary_exists() { + local binary="$1" + +if ! os::util::find::system_binary "${binary}" >/dev/null 2>&1; then + os::log::fatal "Required \`${binary}\` binary was not found in \$PATH." + fi +} +readonly -f os::util::ensure::system_binary_exists + +# os::util::ensure::built_binary_exists ensures that the +# given binary exists on the system in the local output +# directory for the current platform. If it doesn't, we +# will attempt to build it if we can determine the correct +# hack/build-go.sh target for the binary. +# +# This function will attempt to determine the correct +# hack/build-go.sh target for the binary, but may not +# be able to do so if the target doesn't live under +# cmd/ or tools/. In that case, one should be given. +# +# Globals: +# - OS_ROOT +# Arguments: +# - 1: binary to search for +# - 2: optional build target for this binary +# Returns: +# None +function os::util::ensure::built_binary_exists() { + local binary="$1" + local target="${2:-}" + + if ! os::util::find::built_binary "${binary}" >/dev/null 2>&1; then + if [[ -z "${target}" ]]; then + if [[ -d "${OS_ROOT}/cmd/${binary}" ]]; then + target="cmd/${binary}" + elif [[ -d "${OS_ROOT}/tools/${binary}" ]]; then + target="tools/${binary}" + elif [[ -d "${OS_ROOT}/openshift-hack/${binary}" ]]; then + target="openshift-hack/${binary}" + fi + fi + + if [[ -n "${target}" ]]; then + os::log::info "No compiled \`${binary}\` binary was found. Attempting to build one using: + $ hack/build-go.sh ${target}" + "${OS_ROOT}/hack/build-go.sh" "${target}" + else + os::log::fatal "No compiled \`${binary}\` binary was found and no build target could be determined. +Provide the binary and try running $0 again." + fi + fi +} +readonly -f os::util::ensure::built_binary_exists + +# os::util::ensure::gopath_binary_exists ensures that the +# given binary exists on the system in $GOPATH. If it +# doesn't, we will attempt to build it if we can determine +# the correct install path for the binary. +# +# Globals: +# - GOPATH +# Arguments: +# - 1: binary to search for +# - 2: [optional] path to install from +# Returns: +# None +function os::util::ensure::gopath_binary_exists() { + local binary="$1" + local install_path="${2:-}" + + if ! os::util::find::gopath_binary "${binary}" >/dev/null 2>&1; then + if [[ -n "${install_path:-}" ]]; then + os::log::info "No installed \`${binary}\` was found in \$GOPATH. Attempting to install using: + $ go get ${install_path}" + go get "${install_path}" + else + os::log::fatal "Required \`${binary}\` binary was not found in \$GOPATH." + fi + fi +} +readonly -f os::util::ensure::gopath_binary_exists + +# os::util::ensure::iptables_privileges_exist tests if the +# testing machine has iptables available and in PATH. Also +# tests that the user can list iptables rules, trying with +# `sudo` if it fails without. +# +# Globals: +# None +# Arguments: +# None +# Returns: +# None +function os::util::ensure::iptables_privileges_exist() { + os::util::ensure::system_binary_exists 'iptables' + + if ! iptables --list >/dev/null 2>&1 && ! sudo iptables --list >/dev/null 2>&1; then + os::log::fatal "You do not have \`iptables\` or \`sudo\` privileges. Kubernetes services will not work +without \`iptables\` access. See https://github.com/kubernetes/kubernetes/issues/1859." + fi +} +readonly -f os::util::ensure::iptables_privileges_exist diff --git a/openshift-hack/lib/util/environment.sh b/openshift-hack/lib/util/environment.sh new file mode 100644 index 0000000000000..1b0d55c7c471a --- /dev/null +++ b/openshift-hack/lib/util/environment.sh @@ -0,0 +1,296 @@ +#!/usr/bin/env bash + +# This script holds library functions for setting up the shell environment for OpenShift scripts + +# os::util::environment::use_sudo updates $USE_SUDO to be 'true', so that later scripts choosing between +# execution using 'sudo' and execution without it chose to use 'sudo' +# +# Globals: +# None +# Arguments: +# None +# Returns: +# - export USE_SUDO +function os::util::environment::use_sudo() { + USE_SUDO=true + export USE_SUDO +} +readonly -f os::util::environment::use_sudo + +# os::util::environment::setup_time_vars sets up environment variables that describe durations of time +# These variables can be used to specify times for other utility functions +# +# Globals: +# None +# Arguments: +# None +# Returns: +# - export TIME_MS +# - export TIME_SEC +# - export TIME_MIN +function os::util::environment::setup_time_vars() { + TIME_MS=1 + export TIME_MS + TIME_SEC="$(( 1000 * TIME_MS ))" + export TIME_SEC + TIME_MIN="$(( 60 * TIME_SEC ))" + export TIME_MIN +} +readonly -f os::util::environment::setup_time_vars + +# os::util::environment::setup_all_server_vars sets up all environment variables necessary to configure and start an OpenShift server +# +# Globals: +# - OS_ROOT +# - PATH +# - TMPDIR +# - LOG_DIR +# - ARTIFACT_DIR +# - KUBELET_SCHEME +# - KUBELET_BIND_HOST +# - KUBELET_HOST +# - KUBELET_PORT +# - BASETMPDIR +# - ETCD_PORT +# - ETCD_PEER_PORT +# - API_BIND_HOST +# - API_HOST +# - API_PORT +# - API_SCHEME +# - PUBLIC_MASTER_HOST +# - USE_IMAGES +# Arguments: +# - 1: the path under the root temporary directory for OpenShift where these subdirectories should be made +# Returns: +# - export PATH +# - export BASETMPDIR +# - export LOG_DIR +# - export VOLUME_DIR +# - export ARTIFACT_DIR +# - export FAKE_HOME_DIR +# - export KUBELET_SCHEME +# - export KUBELET_BIND_HOST +# - export KUBELET_HOST +# - export KUBELET_PORT +# - export ETCD_PORT +# - export ETCD_PEER_PORT +# - export ETCD_DATA_DIR +# - export API_BIND_HOST +# - export API_HOST +# - export API_PORT +# - export API_SCHEME +# - export SERVER_CONFIG_DIR +# - export MASTER_CONFIG_DIR +# - export NODE_CONFIG_DIR +# - export USE_IMAGES +# - export TAG +function os::util::environment::setup_all_server_vars() { + os::util::environment::setup_kubelet_vars + os::util::environment::setup_etcd_vars + os::util::environment::setup_server_vars + os::util::environment::setup_images_vars +} +readonly -f os::util::environment::setup_all_server_vars + +# os::util::environment::update_path_var updates $PATH so that OpenShift binaries are available +# +# Globals: +# - OS_ROOT +# - PATH +# Arguments: +# None +# Returns: +# - export PATH +function os::util::environment::update_path_var() { + local prefix + if os::util::find::system_binary 'go' >/dev/null 2>&1; then + prefix+="${OS_OUTPUT_BINPATH}/$(os::build::host_platform):" + fi + if [[ -n "${GOPATH:-}" ]]; then + prefix+="${GOPATH}/bin:" + fi + + PATH="${prefix:-}${PATH}" + export PATH +} +readonly -f os::util::environment::update_path_var + +# os::util::environment::setup_tmpdir_vars sets up temporary directory path variables +# +# Globals: +# - TMPDIR +# Arguments: +# - 1: the path under the root temporary directory for OpenShift where these subdirectories should be made +# Returns: +# - export BASETMPDIR +# - export BASEOUTDIR +# - export LOG_DIR +# - export VOLUME_DIR +# - export ARTIFACT_DIR +# - export FAKE_HOME_DIR +# - export OS_TMP_ENV_SET +function os::util::environment::setup_tmpdir_vars() { + local sub_dir=$1 + + BASETMPDIR="${TMPDIR:-/tmp}/openshift/${sub_dir}" + export BASETMPDIR + VOLUME_DIR="${BASETMPDIR}/volumes" + export VOLUME_DIR + + BASEOUTDIR="${OS_OUTPUT_SCRIPTPATH}/${sub_dir}" + export BASEOUTDIR + LOG_DIR="${ARTIFACT_DIR:-${BASEOUTDIR}}/logs" + export LOG_DIR + ARTIFACT_DIR="${ARTIFACT_DIR:-${BASEOUTDIR}/artifacts}" + export ARTIFACT_DIR + FAKE_HOME_DIR="${BASEOUTDIR}/openshift.local.home" + export FAKE_HOME_DIR + + mkdir -p "${LOG_DIR}" "${VOLUME_DIR}" "${ARTIFACT_DIR}" "${FAKE_HOME_DIR}" + + export OS_TMP_ENV_SET="${sub_dir}" +} +readonly -f os::util::environment::setup_tmpdir_vars + +# os::util::environment::setup_kubelet_vars sets up environment variables necessary for interacting with the kubelet +# +# Globals: +# - KUBELET_SCHEME +# - KUBELET_BIND_HOST +# - KUBELET_HOST +# - KUBELET_PORT +# Arguments: +# None +# Returns: +# - export KUBELET_SCHEME +# - export KUBELET_BIND_HOST +# - export KUBELET_HOST +# - export KUBELET_PORT +function os::util::environment::setup_kubelet_vars() { + KUBELET_SCHEME="${KUBELET_SCHEME:-https}" + export KUBELET_SCHEME + KUBELET_BIND_HOST="${KUBELET_BIND_HOST:-127.0.0.1}" + export KUBELET_BIND_HOST + KUBELET_HOST="${KUBELET_HOST:-${KUBELET_BIND_HOST}}" + export KUBELET_HOST + KUBELET_PORT="${KUBELET_PORT:-10250}" + export KUBELET_PORT +} +readonly -f os::util::environment::setup_kubelet_vars + +# os::util::environment::setup_etcd_vars sets up environment variables necessary for interacting with etcd +# +# Globals: +# - BASETMPDIR +# - ETCD_HOST +# - ETCD_PORT +# - ETCD_PEER_PORT +# Arguments: +# None +# Returns: +# - export ETCD_HOST +# - export ETCD_PORT +# - export ETCD_PEER_PORT +# - export ETCD_DATA_DIR +function os::util::environment::setup_etcd_vars() { + ETCD_HOST="${ETCD_HOST:-127.0.0.1}" + export ETCD_HOST + ETCD_PORT="${ETCD_PORT:-4001}" + export ETCD_PORT + ETCD_PEER_PORT="${ETCD_PEER_PORT:-7001}" + export ETCD_PEER_PORT + + ETCD_DATA_DIR="${BASETMPDIR}/etcd" + export ETCD_DATA_DIR + + mkdir -p "${ETCD_DATA_DIR}" +} +readonly -f os::util::environment::setup_etcd_vars + +# os::util::environment::setup_server_vars sets up environment variables necessary for interacting with the server +# +# Globals: +# - BASETMPDIR +# - KUBELET_HOST +# - API_BIND_HOST +# - API_HOST +# - API_PORT +# - API_SCHEME +# - PUBLIC_MASTER_HOST +# Arguments: +# None +# Returns: +# - export API_BIND_HOST +# - export API_HOST +# - export API_PORT +# - export API_SCHEME +# - export SERVER_CONFIG_DIR +# - export MASTER_CONFIG_DIR +# - export NODE_CONFIG_DIR +function os::util::environment::setup_server_vars() { + # turn on cache mutation detector every time we start a server + KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-true}" + export KUBE_CACHE_MUTATION_DETECTOR + + API_BIND_HOST="${API_BIND_HOST:-127.0.0.1}" + export API_BIND_HOST + API_HOST="${API_HOST:-${API_BIND_HOST}}" + export API_HOST + API_PORT="${API_PORT:-8443}" + export API_PORT + API_SCHEME="${API_SCHEME:-https}" + export API_SCHEME + + MASTER_ADDR="${API_SCHEME}://${API_HOST}:${API_PORT}" + export MASTER_ADDR + PUBLIC_MASTER_HOST="${PUBLIC_MASTER_HOST:-${API_HOST}}" + export PUBLIC_MASTER_HOST + + SERVER_CONFIG_DIR="${BASETMPDIR}/openshift.local.config" + export SERVER_CONFIG_DIR + MASTER_CONFIG_DIR="${SERVER_CONFIG_DIR}/master" + export MASTER_CONFIG_DIR + NODE_CONFIG_DIR="${SERVER_CONFIG_DIR}/node-${KUBELET_HOST}" + export NODE_CONFIG_DIR + + ETCD_CLIENT_CERT="${MASTER_CONFIG_DIR}/master.etcd-client.crt" + export ETCD_CLIENT_CERT + ETCD_CLIENT_KEY="${MASTER_CONFIG_DIR}/master.etcd-client.key" + export ETCD_CLIENT_KEY + ETCD_CA_BUNDLE="${MASTER_CONFIG_DIR}/ca-bundle.crt" + export ETCD_CA_BUNDLE + + mkdir -p "${SERVER_CONFIG_DIR}" "${MASTER_CONFIG_DIR}" "${NODE_CONFIG_DIR}" +} +readonly -f os::util::environment::setup_server_vars + +# os::util::environment::setup_images_vars sets up environment variables necessary for interacting with release images +# +# Globals: +# - OS_ROOT +# - USE_IMAGES +# Arguments: +# None +# Returns: +# - export USE_IMAGES +# - export TAG +# - export MAX_IMAGES_BULK_IMPORTED_PER_REPOSITORY +function os::util::environment::setup_images_vars() { + # Use either the latest release built images, or latest. + IMAGE_PREFIX="${OS_IMAGE_PREFIX:-"openshift/origin"}" + if [[ -z "${USE_IMAGES-}" ]]; then + TAG='latest' + export TAG + USE_IMAGES="${IMAGE_PREFIX}-\${component}:latest" + export USE_IMAGES + + if [[ -e "${OS_ROOT}/_output/local/releases/.commit" ]]; then + TAG="$(cat "${OS_ROOT}/_output/local/releases/.commit")" + export TAG + USE_IMAGES="${IMAGE_PREFIX}-\${component}:${TAG}" + export USE_IMAGES + fi + fi + export MAX_IMAGES_BULK_IMPORTED_PER_REPOSITORY="${MAX_IMAGES_BULK_IMPORTED_PER_REPOSITORY:-3}" +} +readonly -f os::util::environment::setup_images_vars diff --git a/openshift-hack/lib/util/find.sh b/openshift-hack/lib/util/find.sh new file mode 100644 index 0000000000000..4ca12d040f9b3 --- /dev/null +++ b/openshift-hack/lib/util/find.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash + +# This script contains helper functions for finding components +# in the Origin repository or on the host machine running scripts. + +# os::util::find::system_binary determines the absolute path to a +# system binary, if it exists. +# +# Globals: +# None +# Arguments: +# - 1: binary name +# Returns: +# - location of the binary +function os::util::find::system_binary() { + local binary_name="$1" + + command -v "${binary_name}" +} +readonly -f os::util::find::system_binary + +# os::util::find::built_binary determines the absolute path to a +# built binary for the current platform, if it exists. +# +# Globals: +# - OS_OUTPUT_BINPATH +# Arguments: +# - 1: binary name +# Returns: +# - location of the binary +function os::util::find::built_binary() { + local binary_name="$1" + + local binary_path; binary_path="${OS_OUTPUT_BINPATH}/$( os::build::host_platform )/${binary_name}" + # we need to check that the path leads to a file + # as directories also have the executable bit set + if [[ -f "${binary_path}" && -x "${binary_path}" ]]; then + echo "${binary_path}" + return 0 + else + return 1 + fi +} +readonly -f os::util::find::built_binary + +# os::util::find::gopath_binary determines the absolute path to a +# binary installed through the go toolchain, if it exists. +# +# Globals: +# - GOPATH +# Arguments: +# - 1: binary name +# Returns: +# - location of the binary +function os::util::find::gopath_binary() { + local binary_name="$1" + + local old_ifs="${IFS}" + IFS=":" + for part in ${GOPATH}; do + local binary_path="${part}/bin/${binary_name}" + # we need to check that the path leads to a file + # as directories also have the executable bit set + if [[ -f "${binary_path}" && -x "${binary_path}" ]]; then + echo "${binary_path}" + IFS="${old_ifs}" + return 0 + fi + done + IFS="${old_ifs}" + return 1 +} +readonly -f os::util::find::gopath_binary \ No newline at end of file diff --git a/openshift-hack/lib/util/misc.sh b/openshift-hack/lib/util/misc.sh new file mode 100644 index 0000000000000..69ea27dc43e2a --- /dev/null +++ b/openshift-hack/lib/util/misc.sh @@ -0,0 +1,224 @@ +#!/usr/bin/env bash +# +# This library holds miscellaneous utility functions. If there begin to be groups of functions in this +# file that share intent or are thematically similar, they should be split into their own files. + +# os::util::describe_return_code describes an exit code +# +# Globals: +# - OS_SCRIPT_START_TIME +# Arguments: +# - 1: exit code to describe +# Returns: +# None +function os::util::describe_return_code() { + local return_code=$1 + local message + message="$( os::util::repository_relative_path "$0" ) exited with code ${return_code} " + + if [[ -n "${OS_SCRIPT_START_TIME:-}" ]]; then + local end_time + end_time="$(date +%s)" + local elapsed_time + elapsed_time="$(( end_time - OS_SCRIPT_START_TIME ))" + local formatted_time + formatted_time="$( os::util::format_seconds "${elapsed_time}" )" + message+="after ${formatted_time}" + fi + + if [[ "${return_code}" = "0" ]]; then + os::log::info "${message}" + else + os::log::error "${message}" + fi +} +readonly -f os::util::describe_return_code + +# os::util::install_describe_return_code installs the return code describer for the EXIT trap +# If the EXIT trap is not initialized, installing this plugin will initialize it. +# +# Globals: +# None +# Arguments: +# None +# Returns: +# - export OS_DESCRIBE_RETURN_CODE +# - export OS_SCRIPT_START_TIME +function os::util::install_describe_return_code() { + export OS_DESCRIBE_RETURN_CODE="true" + OS_SCRIPT_START_TIME="$( date +%s )"; export OS_SCRIPT_START_TIME + os::util::trap::init_exit +} +readonly -f os::util::install_describe_return_code + +# OS_ORIGINAL_WD is the original working directory the script sourcing this utility file was called +# from. This is an important directory as if $0 is a relative path, we cannot use the following path +# utility without knowing from where $0 is relative. +if [[ -z "${OS_ORIGINAL_WD:-}" ]]; then + # since this could be sourced in a context where the utilities are already loaded, + # we want to ensure that this is re-entrant, so we only set $OS_ORIGINAL_WD if it + # is not set already + OS_ORIGINAL_WD="$( pwd )" + readonly OS_ORIGINAL_WD + export OS_ORIGINAL_WD +fi + +# os::util::repository_relative_path returns the relative path from the $OS_ROOT directory to the +# given file, if the file is inside of the $OS_ROOT directory. If the file is outside of $OS_ROOT, +# this function will return the absolute path to the file +# +# Globals: +# - OS_ROOT +# Arguments: +# - 1: the path to relativize +# Returns: +# None +function os::util::repository_relative_path() { + local filename=$1 + local directory; directory="$( dirname "${filename}" )" + filename="$( basename "${filename}" )" + + if [[ "${directory}" != "${OS_ROOT}"* ]]; then + pushd "${OS_ORIGINAL_WD}" >/dev/null 2>&1 || exit 1 + directory="$( os::util::absolute_path "${directory}" )" + popd >/dev/null 2>&1 || exit 1 + fi + + directory="${directory##*${OS_ROOT}/}" + + echo "${directory}/${filename}" +} +readonly -f os::util::repository_relative_path + +# os::util::format_seconds formats a duration of time in seconds to print in HHh MMm SSs +# +# Globals: +# None +# Arguments: +# - 1: time in seconds to format +# Return: +# None +function os::util::format_seconds() { + local raw_seconds=$1 + + local hours minutes seconds + (( hours=raw_seconds/3600 )) + (( minutes=(raw_seconds%3600)/60 )) + (( seconds=raw_seconds%60 )) + + printf '%02dh %02dm %02ds' "${hours}" "${minutes}" "${seconds}" +} +readonly -f os::util::format_seconds + +# os::util::sed attempts to make our Bash scripts agnostic to the platform +# on which they run `sed` by glossing over a discrepancy in flag use in GNU. +# +# Globals: +# None +# Arguments: +# - all: arguments to pass to `sed -i` +# Return: +# None +function os::util::sed() { + local sudo="${USE_SUDO:+sudo}" + if LANG=C sed --help 2>&1 | grep -q "GNU sed"; then + ${sudo} sed -i'' "$@" + else + ${sudo} sed -i '' "$@" + fi +} +readonly -f os::util::sed + +# os::util::base64decode attempts to make our Bash scripts agnostic to the platform +# on which they run `base64decode` by glossing over a discrepancy in flag use in GNU. +# +# Globals: +# None +# Arguments: +# - all: arguments to pass to `base64decode` +# Return: +# None +function os::util::base64decode() { + if [[ "$(go env GOHOSTOS)" == "darwin" ]]; then + base64 -D "$@" + else + base64 -d "$@" + fi +} +readonly -f os::util::base64decode + +# os::util::curl_etcd sends a request to the backing etcd store for the master. +# We use the administrative client cert and key for access and re-encode them +# as necessary for OSX clients. +# +# Globals: +# MASTER_CONFIG_DIR +# API_SCHEME +# API_HOST +# ETCD_PORT +# Arguments: +# - 1: etcd-relative URL to curl, with leading slash +# Returns: +# None +function os::util::curl_etcd() { + local url="$1" + local full_url="${API_SCHEME}://${API_HOST}:${ETCD_PORT}${url}" + + local etcd_client_cert="${MASTER_CONFIG_DIR}/master.etcd-client.crt" + local etcd_client_key="${MASTER_CONFIG_DIR}/master.etcd-client.key" + local ca_bundle="${MASTER_CONFIG_DIR}/ca-bundle.crt" + + if curl -V | grep -q 'SecureTransport'; then + # on newer OSX `curl` implementations, SSL is not used and client certs + # and keys are expected to be encoded in P12 format instead of PEM format, + # so we need to convert the secrets that the server wrote if we haven't + # already done so + local etcd_client_cert_p12="${MASTER_CONFIG_DIR}/master.etcd-client.crt.p12" + local etcd_client_cert_p12_password="${CURL_CERT_P12_PASSWORD:-'password'}" + if [[ ! -f "${etcd_client_cert_p12}" ]]; then + openssl pkcs12 -export \ + -in "${etcd_client_cert}" \ + -inkey "${etcd_client_key}" \ + -out "${etcd_client_cert_p12}" \ + -password "pass:${etcd_client_cert_p12_password}" + fi + + curl --fail --silent --cacert "${ca_bundle}" \ + --cert "${etcd_client_cert_p12}:${etcd_client_cert_p12_password}" "${full_url}" + else + curl --fail --silent --cacert "${ca_bundle}" \ + --cert "${etcd_client_cert}" --key "${etcd_client_key}" "${full_url}" + fi +} + +# os::util::ensure_tmpfs ensures that the target dir is mounted on tmpfs +# +# Globals: +# OS_TMPFS_REQUIRED +# Arguments: +# - 1: target to check +# Returns: +# None +function os::util::ensure_tmpfs() { + if [[ -z "${OS_TMPFS_REQUIRED:-}" ]]; then + return 0 + fi + + local target="$1" + if [[ ! -d "${target}" ]]; then + os::log::fatal "Target dir ${target} does not exist, cannot perform fstype check." + fi + + os::log::debug "Filesystem information: +$( df -h -T )" + + os::log::debug "Mount information: +$( findmnt --all )" + + local fstype + fstype="$( df --output=fstype "${target}" | tail -n 1 )" + if [[ "${fstype}" != "tmpfs" ]]; then + local message="Expected \`${target}\` to be mounted on \`tmpfs\` but found \`${fstype}\` instead." + os::log::fatal "${message}" + fi +} diff --git a/openshift-hack/lib/util/text.sh b/openshift-hack/lib/util/text.sh new file mode 100644 index 0000000000000..708a47251cb20 --- /dev/null +++ b/openshift-hack/lib/util/text.sh @@ -0,0 +1,164 @@ +#!/usr/bin/env bash + +# This file contains helpful aliases for manipulating the output text to the terminal as +# well as functions for one-command augmented printing. + +# os::text::reset resets the terminal output to default if it is called in a TTY +function os::text::reset() { + if os::text::internal::is_tty; then + tput sgr0 + fi +} +readonly -f os::text::reset + +# os::text::bold sets the terminal output to bold text if it is called in a TTY +function os::text::bold() { + if os::text::internal::is_tty; then + tput bold + fi +} +readonly -f os::text::bold + +# os::text::red sets the terminal output to red text if it is called in a TTY +function os::text::red() { + if os::text::internal::is_tty; then + tput setaf 1 + fi +} +readonly -f os::text::red + +# os::text::green sets the terminal output to green text if it is called in a TTY +function os::text::green() { + if os::text::internal::is_tty; then + tput setaf 2 + fi +} +readonly -f os::text::green + +# os::text::blue sets the terminal output to blue text if it is called in a TTY +function os::text::blue() { + if os::text::internal::is_tty; then + tput setaf 4 + fi +} +readonly -f os::text::blue + +# os::text::yellow sets the terminal output to yellow text if it is called in a TTY +function os::text::yellow() { + if os::text::internal::is_tty; then + tput setaf 11 + fi +} +readonly -f os::text::yellow + +# os::text::clear_last_line clears the text from the last line of output to the +# terminal and leaves the cursor on that line to allow for overwriting that text +# if it is called in a TTY +function os::text::clear_last_line() { + if os::text::internal::is_tty; then + tput cuu 1 + tput el + fi +} +readonly -f os::text::clear_last_line + +# os::text::clear_string attempts to clear the entirety of a string from the terminal. +# If the string contains literal tabs or other characters that take up more than one +# character space in output, or if the window size is changed before this function +# is called, it will not function correctly. +# No action is taken if this is called outside of a TTY +function os::text::clear_string() { + local -r string="$1" + if os::text::internal::is_tty; then + echo "${string}" | while read -r line; do + # num_lines is the number of terminal lines this one line of output + # would have taken up with the current terminal width in columns + local num_lines=$(( ${#line} / $( tput cols ) )) + for (( i = 0; i <= num_lines; i++ )); do + os::text::clear_last_line + done + done + fi +} + +# os::text::internal::is_tty determines if we are outputting to a TTY +function os::text::internal::is_tty() { + [[ -t 1 && -n "${TERM:-}" ]] +} +readonly -f os::text::internal::is_tty + +# os::text::print_bold prints all input in bold text +function os::text::print_bold() { + os::text::bold + echo "${*}" + os::text::reset +} +readonly -f os::text::print_bold + +# os::text::print_red prints all input in red text +function os::text::print_red() { + os::text::red + echo "${*}" + os::text::reset +} +readonly -f os::text::print_red + +# os::text::print_red_bold prints all input in bold red text +function os::text::print_red_bold() { + os::text::red + os::text::bold + echo "${*}" + os::text::reset +} +readonly -f os::text::print_red_bold + +# os::text::print_green prints all input in green text +function os::text::print_green() { + os::text::green + echo "${*}" + os::text::reset +} +readonly -f os::text::print_green + +# os::text::print_green_bold prints all input in bold green text +function os::text::print_green_bold() { + os::text::green + os::text::bold + echo "${*}" + os::text::reset +} +readonly -f os::text::print_green_bold + +# os::text::print_blue prints all input in blue text +function os::text::print_blue() { + os::text::blue + echo "${*}" + os::text::reset +} +readonly -f os::text::print_blue + +# os::text::print_blue_bold prints all input in bold blue text +function os::text::print_blue_bold() { + os::text::blue + os::text::bold + echo "${*}" + os::text::reset +} +readonly -f os::text::print_blue_bold + +# os::text::print_yellow prints all input in yellow text +function os::text::print_yellow() { + os::text::yellow + echo "${*}" + os::text::reset +} +readonly -f os::text::print_yellow + +# os::text::print_yellow_bold prints all input in bold yellow text +function os::text::print_yellow_bold() { + os::text::yellow + os::text::bold + echo "${*}" + os::text::reset +} +readonly -f os::text::print_yellow_bold diff --git a/openshift-hack/lib/util/trap.sh b/openshift-hack/lib/util/trap.sh new file mode 100644 index 0000000000000..f76d6bfe404d5 --- /dev/null +++ b/openshift-hack/lib/util/trap.sh @@ -0,0 +1,99 @@ +#!/usr/bin/env bash +# +# This library defines the trap handlers for the ERR and EXIT signals. Any new handler for these signals +# must be added to these handlers and activated by the environment variable mechanism that the rest use. +# These functions ensure that no handler can ever alter the exit code that was emitted by a command +# in a test script. + +# os::util::trap::init_err initializes the privileged handler for the ERR signal if it hasn't +# been registered already. This will overwrite any other handlers registered on the signal. +# +# Globals: +# None +# Arguments: +# None +# Returns: +# None +function os::util::trap::init_err() { + if ! trap -p ERR | grep -q 'os::util::trap::err_handler'; then + trap 'os::util::trap::err_handler;' ERR + fi +} +readonly -f os::util::trap::init_err + +# os::util::trap::init_exit initializes the privileged handler for the EXIT signal if it hasn't +# been registered already. This will overwrite any other handlers registered on the signal. +# +# Globals: +# None +# Arguments: +# None +# Returns: +# None +function os::util::trap::init_exit() { + if ! trap -p EXIT | grep -q 'os::util::trap::exit_handler'; then + trap 'os::util::trap::exit_handler;' EXIT + fi +} +readonly -f os::util::trap::init_exit + +# os::util::trap::err_handler is the handler for the ERR signal. +# +# Globals: +# - OS_TRAP_DEBUG +# - OS_USE_STACKTRACE +# Arguments: +# None +# Returns: +# - returns original return code, allows privileged handler to exit if necessary +function os::util::trap::err_handler() { + local -r return_code=$? + local -r last_command="${BASH_COMMAND}" + + if set +o | grep -q '\-o errexit'; then + local -r errexit_set=true + fi + + if [[ "${OS_TRAP_DEBUG:-}" = "true" ]]; then + echo "[DEBUG] Error handler executing with return code \`${return_code}\`, last command \`${last_command}\`, and errexit set \`${errexit_set:-}\`" + fi + + if [[ "${OS_USE_STACKTRACE:-}" = "true" ]]; then + # the OpenShift stacktrace function is treated as a privileged handler for this signal + # and is therefore allowed to run outside of a subshell in order to allow it to `exit` + # if necessary + os::log::stacktrace::print "${return_code}" "${last_command}" "${errexit_set:-}" + fi + + return "${return_code}" +} +readonly -f os::util::trap::err_handler + +# os::util::trap::exit_handler is the handler for the EXIT signal. +# +# Globals: +# - OS_TRAP_DEBUG +# - OS_DESCRIBE_RETURN_CODE +# Arguments: +# None +# Returns: +# - original exit code of the script that exited +function os::util::trap::exit_handler() { + local -r return_code=$? + + # we do not want these traps to be able to trigger more errors, we can let them fail silently + set +o errexit + + if [[ "${OS_TRAP_DEBUG:-}" = "true" ]]; then + echo "[DEBUG] Exit handler executing with return code \`${return_code}\`" + fi + + # the following envars selectively enable optional exit traps, all of which are run inside of + # a subshell in order to sandbox them and not allow them to influence how this script will exit + if [[ "${OS_DESCRIBE_RETURN_CODE:-}" = "true" ]]; then + ( os::util::describe_return_code "${return_code}" ) + fi + + exit "${return_code}" +} +readonly -f os::util::trap::exit_handler diff --git a/openshift-hack/rebase.sh b/openshift-hack/rebase.sh new file mode 100755 index 0000000000000..70ea50b38baa9 --- /dev/null +++ b/openshift-hack/rebase.sh @@ -0,0 +1,175 @@ +#!/bin/bash + +# READ FIRST BEFORE USING THIS SCRIPT +# +# This script requires jq, git, podman and bash to work properly (dependencies are checked for you). +# The Github CLI "gh" is optional, but convenient to create a pull request automatically at the end. +# +# This script generates a git remote structure described in: +# https://github.com/openshift/kubernetes/blob/master/REBASE.openshift.md#preparing-the-local-repo-clone +# Please check if you have configured the correct remotes, otherwise the script will fail. +# +# The usage is described in /Rebase.openshift.md. + +# validate input args --k8s-tag=v1.21.2 --openshift-release=release-4.8 --bugzilla-id=2003027 +k8s_tag="" +openshift_release="" +bugzilla_id="" + +usage() { + echo "Available arguments:" + echo " --k8s-tag (required) Example: --k8s-tag=v1.21.2" + echo " --openshift-release (required) Example: --openshift-release=release-4.8" + echo " --bugzilla-id (optional) creates new PR against openshift/kubernetes:${openshift-release}: Example: --bugzilla-id=2003027" +} + +for i in "$@"; do + case $i in + --k8s-tag=*) + k8s_tag="${i#*=}" + shift + ;; + --openshift-release=*) + openshift_release="${i#*=}" + shift + ;; + --bugzilla-id=*) + bugzilla_id="${i#*=}" + shift + ;; + *) + usage + exit 1 + ;; + esac +done + +if [ -z "${k8s_tag}" ]; then + echo "Required argument missing: --k8s-tag" + echo "" + usage + exit 1 +fi + +if [ -z "${openshift_release}" ]; then + echo "Required argument missing: --openshift-release" + echo "" + usage + exit 1 +fi + +echo "Processed arguments are:" +echo "--k8s_tag=${k8s_tag}" +echo "--openshift_release=${openshift_release}" +echo "--bugzilla_id=${bugzilla_id}" + +# prerequisites (check git, podman, ... is present) +if ! command -v git &>/dev/null; then + echo "git not installed, exiting" + exit 1 +fi + +if ! command -v jq &>/dev/null; then + echo "jq not installed, exiting" + exit 1 +fi + +if ! command -v podman &>/dev/null; then + echo "podman not installed, exiting" + exit 1 +fi + +# make sure we're in "kubernetes" dir +if [[ $(basename "$PWD") != "kubernetes" ]]; then + echo "Not in kubernetes dir, exiting" + exit 1 +fi + +origin=$(git remote get-url origin) +if [[ "$origin" =~ .*kubernetes/kubernetes.* || "$origin" =~ .*openshift/kubernetes.* ]]; then + echo "cannot rebase against k/k or o/k! found: ${origin}, exiting" + exit 1 +fi + +# fetch remote https://github.com/kubernetes/kubernetes +git remote add upstream git@github.com:kubernetes/kubernetes.git +git fetch upstream --tags -f +# fetch remote https://github.com/openshift/kubernetes +git remote add openshift git@github.com:openshift/kubernetes.git +git fetch openshift + +#git checkout --track "openshift/$openshift_release" +git pull openshift "$openshift_release" + +git merge "$k8s_tag" +# shellcheck disable=SC2181 +if [ $? -eq 0 ]; then + echo "No conflicts detected. Automatic merge looks to have succeeded" +else + # commit conflicts + git commit -a + # resolve conflicts + git status + # TODO(tjungblu): we follow-up with a more automated approach: + # - 2/3s of conflicts stem from go.mod/sum, which can be resolved deterministically + # - the large majority of the remainder are vendor/generation conflicts + # - only very few cases require manual intervention due to conflicting business logic + echo "Resolve conflicts manually in another terminal, only then continue" + + # wait for user interaction + read -n 1 -s -r -p "PRESS ANY KEY TO CONTINUE" + + # TODO(tjungblu): verify that the conflicts have been resolved + git commit -am "UPSTREAM: : manually resolve conflicts" +fi + +# openshift-hack/images/hyperkube/Dockerfile.rhel still has FROM pointing to old tag +# we need to remove the prefix "v" from the $k8s_tag to stay compatible +sed -i -E "s/(io.openshift.build.versions=\"kubernetes=)(1.[1-9]+.[1-9]+)/\1${k8s_tag:1}/" openshift-hack/images/hyperkube/Dockerfile.rhel +go_mod_go_ver=$(grep -E 'go 1\.[1-9][0-9]?' go.mod | sed -E 's/go (1\.[1-9][0-9]?)/\1/') +tag="rhel-8-release-golang-${go_mod_go_ver}-openshift-${openshift_release#release-}" + +# update openshift go.mod dependencies +sed -i -E "/=>/! s/(\tgithub.com\/openshift\/[a-z|-]+) (.*)$/\1 $openshift_release/" go.mod + +echo "> go mod tidy && hack/update-vendor.sh" +podman run -it --rm -v "$(pwd):/go/k8s.io/kubernetes:Z" \ + --workdir=/go/k8s.io/kubernetes \ + "registry.ci.openshift.org/openshift/release:$tag" \ + go mod tidy && hack/update-vendor.sh + +# shellcheck disable=SC2181 +if [ $? -ne 0 ]; then + echo "updating the vendor folder failed, is any dependency missing?" + exit 1 +fi + +podman run -it --rm -v "$(pwd):/go/k8s.io/kubernetes:Z" \ + --workdir=/go/k8s.io/kubernetes \ + "registry.ci.openshift.org/openshift/release:$tag" \ + make update OS_RUN_WITHOUT_DOCKER=yes + +git add -A +git commit -m "UPSTREAM: : hack/update-vendor.sh, make update and update image" + +remote_branch="rebase-$k8s_tag" +git push origin "$openshift_release:$remote_branch" + +XY=$(echo "$k8s_tag" | sed -E "s/v(1\.[0-9]+)\.[0-9]+/\1/") +ver=$(echo "$k8s_tag" | sed "s/\.//g") +link="https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-$XY.md#$ver" +if [ -n "${bugzilla_id}" ]; then + if command -v gh &>/dev/null; then + XY=$(echo "$k8s_tag" | sed -E "s/v(1\.[0-9]+)\.[0-9]+/\1/") + ver=$(echo "$k8s_tag" | sed "s/\.//g") + link="https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-$XY.md#$ver" + + # opens a web browser, because we can't properly create PRs against remote repositories with the GH CLI (yet): + # https://github.com/cli/cli/issues/2691 + gh pr create \ + --title "Bug $bugzilla_id: Rebase $k8s_tag" \ + --body "CHANGELOG $link" \ + --web + + fi +fi diff --git a/openshift-hack/sysctls/50-kubelet.conf b/openshift-hack/sysctls/50-kubelet.conf new file mode 100644 index 0000000000000..3a4d5a7b1af63 --- /dev/null +++ b/openshift-hack/sysctls/50-kubelet.conf @@ -0,0 +1,6 @@ +kernel.keys.root_maxbytes=25000000 +kernel.keys.root_maxkeys=1000000 +kernel.panic=10 +kernel.panic_on_oops=1 +vm.overcommit_memory=1 +vm.panic_on_oom=0 diff --git a/openshift-hack/test-go.sh b/openshift-hack/test-go.sh new file mode 100755 index 0000000000000..30793e2b082df --- /dev/null +++ b/openshift-hack/test-go.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +# shellcheck source=openshift-hack/lib/init.sh +source "$(dirname "${BASH_SOURCE[0]}")/lib/init.sh" + +ARTIFACTS="${ARTIFACTS:-/tmp/artifacts}" +mkdir -p "${ARTIFACTS}" + +export KUBERNETES_SERVICE_HOST= +export KUBE_JUNIT_REPORT_DIR="${ARTIFACTS}" +export KUBE_KEEP_VERBOSE_TEST_OUTPUT=y +export KUBE_RACE=-race +export KUBE_TEST_ARGS='-p 8' +export KUBE_TIMEOUT='--timeout=360s' + +make test diff --git a/openshift-hack/test-integration.sh b/openshift-hack/test-integration.sh new file mode 100755 index 0000000000000..93c3ea902b099 --- /dev/null +++ b/openshift-hack/test-integration.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# shellcheck source=openshift-hack/lib/init.sh +source "$(dirname "${BASH_SOURCE[0]}")/lib/init.sh" + +./hack/install-etcd.sh +PATH="${OS_ROOT}/third_party/etcd:${PATH}" + +ARTIFACTS="${ARTIFACTS:-/tmp/artifacts}" +mkdir -p "${ARTIFACTS}" + +export KUBERNETES_SERVICE_HOST= +export KUBE_JUNIT_REPORT_DIR="${ARTIFACTS}" +export KUBE_KEEP_VERBOSE_TEST_OUTPUT=y +export KUBE_RACE=-race +export KUBE_TEST_ARGS='-p 8' +export LOG_LEVEL=4 +export PATH + +make test-integration diff --git a/openshift-hack/test-kubernetes-e2e.sh b/openshift-hack/test-kubernetes-e2e.sh new file mode 100755 index 0000000000000..ea005aee55aef --- /dev/null +++ b/openshift-hack/test-kubernetes-e2e.sh @@ -0,0 +1,88 @@ +#!/bin/bash + +set -o nounset +set -o errexit +set -o pipefail + +# This script is executes kubernetes e2e tests against an openshift +# cluster. It is intended to be copied to the kubernetes-tests image +# for use in CI and should have no dependencies beyond oc, kubectl and +# k8s-e2e.test. + +# Identify the platform under test to allow skipping tests that are +# not compatible. +CLUSTER_TYPE="${CLUSTER_TYPE:-gcp}" +case "${CLUSTER_TYPE}" in + gcp) + # gce is used as a platform label instead of gcp + PLATFORM=gce + ;; + *) + PLATFORM="${CLUSTER_TYPE}" + ;; +esac + +# openshift-tests will check the cluster's network configuration and +# automatically skip any incompatible tests. We have to do that manually +# here. +NETWORK_SKIPS="\[Skipped:Network/OVNKubernetes\]|\[Feature:Networking-IPv6\]|\[Feature:IPv6DualStack.*\]|\[Feature:SCTPConnectivity\]" + +# Support serial and parallel test suites +TEST_SUITE="${TEST_SUITE:-parallel}" +COMMON_SKIPS="\[Slow\]|\[Disruptive\]|\[Flaky\]|\[Disabled:.+\]|\[Skipped:${PLATFORM}\]|${NETWORK_SKIPS}" +case "${TEST_SUITE}" in +serial) + DEFAULT_TEST_ARGS="-focus=\[Serial\] -skip=${COMMON_SKIPS}" + NODES=1 + ;; +parallel) + DEFAULT_TEST_ARGS="-skip=\[Serial\]|${COMMON_SKIPS}" + # Use the same number of nodes - 30 - as specified for the parallel + # suite defined in origin. + NODES=${NODES:-30} + ;; +*) + echo >&2 "Unsupported test suite '${TEST_SUITE}'" + exit 1 + ;; +esac + +# Set KUBE_E2E_TEST_ARGS to configure test arguments like +# -skip and -focus. +KUBE_E2E_TEST_ARGS="${KUBE_E2E_TEST_ARGS:-${DEFAULT_TEST_ARGS}}" + +# k8s-e2e.test and ginkgo are expected to be in the path in +# CI. Outside of CI, ensure k8s-e2e.test and ginkgo are built and +# available in PATH. +if ! which k8s-e2e.test &> /dev/null; then + make WHAT=vendor/github.com/onsi/ginkgo/v2/ginkgo + make WHAT=openshift-hack/e2e/k8s-e2e.test + ROOT_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")/.."; pwd -P)" + PATH="${ROOT_PATH}/_output/local/bin/$(go env GOHOSTOS)/$(go env GOARCH):${PATH}" + export PATH +fi + +# Execute OpenShift prerequisites +# Disable container security +oc adm policy add-scc-to-group privileged system:authenticated system:serviceaccounts +oc adm policy add-scc-to-group anyuid system:authenticated system:serviceaccounts +unschedulable="$( ( oc get nodes -o name -l 'node-role.kubernetes.io/master'; ) | wc -l )" + +test_report_dir="${ARTIFACTS:-/tmp/artifacts}" +mkdir -p "${test_report_dir}" + +# Retrieve the hostname of the server to enable kubectl testing +SERVER= +SERVER="$( kubectl config view | grep server | head -n 1 | awk '{print $2}' )" + +# shellcheck disable=SC2086 +ginkgo \ + --flake-attempts=3 \ + --timeout="24h" \ + --output-interceptor-mode=none \ + -nodes "${NODES}" -no-color ${KUBE_E2E_TEST_ARGS} \ + "$( which k8s-e2e.test )" -- \ + -report-dir "${test_report_dir}" \ + -host "${SERVER}" \ + -allowed-not-ready-nodes ${unschedulable} \ + 2>&1 | tee -a "${test_report_dir}/k8s-e2e.log" diff --git a/openshift-hack/update-kubensenter.sh b/openshift-hack/update-kubensenter.sh new file mode 100755 index 0000000000000..a7ca2693964be --- /dev/null +++ b/openshift-hack/update-kubensenter.sh @@ -0,0 +1,139 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +source "$KUBE_ROOT/hack/lib/init.sh" + +# Convert a path relative to $KUBE_ROOT to a real path +localpath() { + realpath "$KUBE_ROOT/$1" +} + +# Configuration for fetching this file, relative to this repository root +ENVFILE=openshift-hack/kubensenter.env + +# The source of the file, relative to the remote repository root +SOURCE=utils/kubensenter/kubensenter + +# The destination of the file, relative to this repository root +DESTINATION=openshift-hack/images/hyperkube/kubensenter + +usage() { + source_env + echo "Usage:" + echo " $0 [--to-latest]" + echo + echo "Updates the local copy of $DESTINATION as configured in $ENVFILE:" + echo " REPO: $REPO" + echo " COMMIT: $COMMIT" + echo + echo "Options:" + echo " --to-latest (or env UPDATE_TO_LATEST=1)" + echo " Update $ENVFILE to the latest commit or tag in $REPO configured by the TARGET entry" + echo " (currently \"$TARGET\"), and synchronize to the updated commit." + echo " - If TARGET resolves to a branch, pin to the latest commit hash from that branch" + echo " - If TARGET resolves to a tag, pin to the latest tag that matches that pattern" + echo " - TARGET may be a glob-like expression such as \"v1.1.*\" that would match any of the following:" + echo " v1.1.0 v1.1.3 v1.1.22-rc1" + exit 1 +} + +source_env() { + source "$(localpath "$ENVFILE")" + # Intentionally global scope: + REPO=${REPO:-"github.com/containers/kubensmnt"} + COMMIT=${COMMIT:-"main"} + TARGET=${TARGET:-"main"} +} + +edit_envfile() { + local envfile=$1 + local refname=$2 + + # Shell-quote refname in case it contains any shell-special characters + local newcommit=$(printf 'COMMIT=%q' "$refname") + if [[ $# -gt 2 ]]; then + shift 2 + # Add the comment suffix + newcommit="$newcommit # $*" + fi + + local patch + patch=$(printf "%q" "$newcommit") + # Note: Using ':' since it is not a valid tag character according to git-check-ref-format(1) + sed -i "s:^COMMIT=.*:$patch:" "$envfile" +} + +update_env() { + local repouri latest refhash reftype refname + source_env + repouri=https://$REPO.git + echo "Updating to latest $TARGET from $repouri" + + latest=$(git \ + -c "versionsort.suffix=-alpha" \ + -c "versionsort.suffix=-beta" \ + -c "versionsort.suffix=-rc" \ + ls-remote \ + --heads --tags \ + --sort='-version:refname' \ + "$repouri" "$TARGET" \ + | head -n 1) + if [[ -z $latest ]]; then + echo "ERROR: No matching ref found for $TARGET" + return 1 + fi + refhash=$(cut -f1 <<<"$latest") + reftype=$(cut -d/ -f2 <<<"$latest") + refname=$(cut -d/ -f3 <<<"$latest") + + if [[ $reftype == "tags" ]]; then + echo " Latest tag is $refname ($refhash)" + edit_envfile "$ENVFILE" "$refname" "($refhash)" + else + echo " Latest on branch $refname is $refhash" + edit_envfile "$ENVFILE" "$refhash" + fi +} + +do_fetch() { + source_env + local repohost reponame uri + repohost=$(cut -d/ -f1 <<<"$REPO") + reponame=${REPO#$repohost/} + case $repohost in + github.com) + uri=https://raw.githubusercontent.com/$reponame/$COMMIT/$SOURCE + ;; + *) + echo "No support for repositories hosted on $repohost" + return 2 + ;; + esac + + echo "Fetching $DESTINATION from $uri" + curl -fsLo "$(localpath "$DESTINATION")" "$uri" +} + +main() { + local to_latest=${UPDATE_TO_LATEST:-} + if [[ $# -gt 0 ]]; then + if [[ $1 == "--help" || $1 == "-h" ]]; then + usage + elif [[ $1 == "--to-latest" ]]; then + to_latest=1 + fi + fi + + if [[ $to_latest ]]; then + update_env + fi + + do_fetch +} + +# bash modulino +[[ "${BASH_SOURCE[0]}" == "$0" ]] && main "$@" diff --git a/openshift-hack/update-test-annotations.sh b/openshift-hack/update-test-annotations.sh new file mode 100755 index 0000000000000..82aa9b36bbdd3 --- /dev/null +++ b/openshift-hack/update-test-annotations.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +source "${KUBE_ROOT}/hack/lib/init.sh" + +kube::golang::setup_env + +# Update e2e test annotations that indicate openshift compatibility +go generate -mod vendor ./openshift-hack/e2e diff --git a/openshift-hack/verify-kubensenter.sh b/openshift-hack/verify-kubensenter.sh new file mode 100755 index 0000000000000..07093f09809e7 --- /dev/null +++ b/openshift-hack/verify-kubensenter.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +source "${KUBE_ROOT}/hack/lib/init.sh" + +# Update kubensenter and error if a change is detected +"${KUBE_ROOT}"/hack/update-kubensenter.sh +git diff --quiet "${KUBE_ROOT}/openshift-hack/images/hyperkube/kubensenter" diff --git a/openshift-hack/verify-test-annotations.sh b/openshift-hack/verify-test-annotations.sh new file mode 100755 index 0000000000000..1b04bb0d60308 --- /dev/null +++ b/openshift-hack/verify-test-annotations.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +source "${KUBE_ROOT}/hack/lib/init.sh" + +# Make sure that all packages that define k8s tests are properly imported +EXCLUDE_PACKAGES="\ +k8s.io/kubernetes/test/e2e/framework,\ +k8s.io/kubernetes/test/e2e/framework/debug/init,\ +k8s.io/kubernetes/test/e2e/framework/metrics/init,\ +k8s.io/kubernetes/test/e2e/framework/node/init,\ +k8s.io/kubernetes/test/e2e/framework/testfiles,\ +k8s.io/kubernetes/test/e2e/storage/external,\ +k8s.io/kubernetes/test/e2e/testing-manifests,\ +k8s.io/kubernetes/test/e2e/windows" + +GO111MODULE=on go run ./openshift-hack/cmd/go-imports-diff \ + -exclude "$EXCLUDE_PACKAGES" \ + test/e2e/e2e_test.go \ + openshift-hack/e2e/include.go + +# Verify e2e test annotations that indicate openshift compatibility +"${KUBE_ROOT}"/hack/update-test-annotations.sh +git diff --quiet "${KUBE_ROOT}/openshift-hack/e2e/annotate/generated/" diff --git a/openshift-hack/verify.sh b/openshift-hack/verify.sh new file mode 100755 index 0000000000000..9361e8f4faea9 --- /dev/null +++ b/openshift-hack/verify.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# shellcheck source=openshift-hack/lib/init.sh +source "$(dirname "${BASH_SOURCE[0]}")/lib/init.sh" + +# Required for openapi verification +PATH="$(pwd)/third_party/etcd:${PATH}" + +# Attempt to verify without docker if it is not available. +OS_RUN_WITHOUT_DOCKER= +if ! which docker &> /dev/null; then + os::log::warning "docker not available, attempting to run verify without it" + OS_RUN_WITHOUT_DOCKER=y + + # Without docker, shellcheck may need to be installed. + PATH="$( os::deps::path_with_shellcheck )" +fi +export OS_RUN_WITHOUT_DOCKER + +export PATH + +ARTIFACTS="${ARTIFACTS:-/tmp/artifacts}" +mkdir -p "${ARTIFACTS}" +export KUBE_JUNIT_REPORT_DIR="${ARTIFACTS}" + +make verify diff --git a/openshift-kube-apiserver/admission/admissionenablement/admission.go b/openshift-kube-apiserver/admission/admissionenablement/admission.go new file mode 100644 index 0000000000000..a701f6d285cae --- /dev/null +++ b/openshift-kube-apiserver/admission/admissionenablement/admission.go @@ -0,0 +1,15 @@ +package admissionenablement + +import ( + "k8s.io/kubernetes/cmd/kube-apiserver/app/options" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration" +) + +func InstallOpenShiftAdmissionPlugins(o *options.ServerRunOptions) { + existingAdmissionOrder := o.Admission.GenericAdmission.RecommendedPluginOrder + o.Admission.GenericAdmission.RecommendedPluginOrder = NewOrderedKubeAdmissionPlugins(existingAdmissionOrder) + RegisterOpenshiftKubeAdmissionPlugins(o.Admission.GenericAdmission.Plugins) + customresourcevalidationregistration.RegisterCustomResourceValidation(o.Admission.GenericAdmission.Plugins) + existingDefaultOff := o.Admission.GenericAdmission.DefaultOffPlugins + o.Admission.GenericAdmission.DefaultOffPlugins = NewDefaultOffPluginsFunc(existingDefaultOff)() +} diff --git a/openshift-kube-apiserver/admission/admissionenablement/admission_config.go b/openshift-kube-apiserver/admission/admissionenablement/admission_config.go new file mode 100644 index 0000000000000..dedb9eddbc00f --- /dev/null +++ b/openshift-kube-apiserver/admission/admissionenablement/admission_config.go @@ -0,0 +1,51 @@ +package admissionenablement + +import ( + "time" + + "github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apiserver/pkg/admission" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/namespaceconditions" + controlplaneapiserver "k8s.io/kubernetes/pkg/controlplane/apiserver/options" +) + +const disableSCCLevelLabel = "security.openshift.io/disable-securitycontextconstraints" + +var enforceSCCSelector labels.Selector + +func init() { + var err error + enforceSCCSelector, err = labels.Parse(disableSCCLevelLabel + " != true") + if err != nil { + panic(err) + } +} + +func SetAdmissionDefaults(o *controlplaneapiserver.CompletedOptions, informers informers.SharedInformerFactory, kubeClient kubernetes.Interface) { + // set up the decorators we need. This is done late and out of order because our decorators currently require informers which are not + // present until we start running + namespaceLabelDecorator := namespaceconditions.NamespaceLabelConditions{ + NamespaceClient: kubeClient.CoreV1(), + NamespaceLister: informers.Core().V1().Namespaces().Lister(), + + SkipLevelZeroNames: SkipRunLevelZeroPlugins, + SkipLevelOneNames: SkipRunLevelOnePlugins, + } + sccLabelDecorator := namespaceconditions.NewConditionalAdmissionPlugins( + kubeClient.CoreV1(), informers.Core().V1().Namespaces().Lister(), enforceSCCSelector, + "security.openshift.io/SecurityContextConstraint", "security.openshift.io/SCCExecRestrictions") + + o.Admission.GenericAdmission.Decorators = append(o.Admission.GenericAdmission.Decorators, + admission.Decorators{ + // SCC can be skipped by setting a namespace label `security.openshift.io/disable-securitycontextconstraints = true` + // This is useful for disabling SCC and using PodSecurity admission instead. + admission.DecoratorFunc(sccLabelDecorator.WithNamespaceLabelSelector), + + admission.DecoratorFunc(namespaceLabelDecorator.WithNamespaceLabelConditions), + admission.DecoratorFunc(admissiontimeout.AdmissionTimeout{Timeout: 13 * time.Second}.WithTimeout), + }, + ) +} diff --git a/openshift-kube-apiserver/admission/admissionenablement/register.go b/openshift-kube-apiserver/admission/admissionenablement/register.go new file mode 100644 index 0000000000000..e04020266b825 --- /dev/null +++ b/openshift-kube-apiserver/admission/admissionenablement/register.go @@ -0,0 +1,125 @@ +package admissionenablement + +import ( + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/plugin/resourcequota" + mutatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/mixedcpus" + + "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy" + imagepolicyapiv1 "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1" + quotaclusterresourcequota "github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota" + "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccadmission" + authorizationrestrictusers "k8s.io/kubernetes/openshift-kube-apiserver/admission/authorization/restrictusers" + quotaclusterresourceoverride "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/managednode" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride" + quotarunonceduration "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/runonceduration" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/externalipranger" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/restrictedendpoints" + ingressadmission "k8s.io/kubernetes/openshift-kube-apiserver/admission/route" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/route/hostassignment" + projectnodeenv "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/nodeenv" + schedulerpodnodeconstraints "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/podnodeconstraints" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity" +) + +func RegisterOpenshiftKubeAdmissionPlugins(plugins *admission.Plugins) { + authorizationrestrictusers.Register(plugins) + hostassignment.Register(plugins) + imagepolicy.Register(plugins) + ingressadmission.Register(plugins) + managementcpusoverride.Register(plugins) + managednode.Register(plugins) + mixedcpus.Register(plugins) + projectnodeenv.Register(plugins) + quotaclusterresourceoverride.Register(plugins) + quotaclusterresourcequota.Register(plugins) + quotarunonceduration.Register(plugins) + schedulerpodnodeconstraints.Register(plugins) + sccadmission.Register(plugins) + sccadmission.RegisterSCCExecRestrictions(plugins) + externalipranger.RegisterExternalIP(plugins) + restrictedendpoints.RegisterRestrictedEndpoints(plugins) + csiinlinevolumesecurity.Register(plugins) +} + +var ( + + // these are admission plugins that cannot be applied until after the kubeapiserver starts. + // TODO if nothing comes to mind in 3.10, kill this + SkipRunLevelZeroPlugins = sets.NewString() + // these are admission plugins that cannot be applied until after the openshiftapiserver apiserver starts. + SkipRunLevelOnePlugins = sets.NewString( + imagepolicyapiv1.PluginName, // "image.openshift.io/ImagePolicy" + "quota.openshift.io/ClusterResourceQuota", + "security.openshift.io/SecurityContextConstraint", + "security.openshift.io/SCCExecRestrictions", + ) + + // openshiftAdmissionPluginsForKubeBeforeMutating are the admission plugins to add after kube admission, before mutating webhooks + openshiftAdmissionPluginsForKubeBeforeMutating = []string{ + "autoscaling.openshift.io/ClusterResourceOverride", + managementcpusoverride.PluginName, // "autoscaling.openshift.io/ManagementCPUsOverride" + "authorization.openshift.io/RestrictSubjectBindings", + "autoscaling.openshift.io/RunOnceDuration", + "scheduling.openshift.io/PodNodeConstraints", + "scheduling.openshift.io/OriginPodNodeEnvironment", + "network.openshift.io/ExternalIPRanger", + "network.openshift.io/RestrictedEndpointsAdmission", + imagepolicyapiv1.PluginName, // "image.openshift.io/ImagePolicy" + "security.openshift.io/SecurityContextConstraint", + "security.openshift.io/SCCExecRestrictions", + "route.openshift.io/IngressAdmission", + hostassignment.PluginName, // "route.openshift.io/RouteHostAssignment" + csiinlinevolumesecurity.PluginName, // "storage.openshift.io/CSIInlineVolumeSecurity" + managednode.PluginName, // "autoscaling.openshift.io/ManagedNode" + mixedcpus.PluginName, // "autoscaling.openshift.io/MixedCPUs" + } + + // openshiftAdmissionPluginsForKubeAfterResourceQuota are the plugins to add after ResourceQuota plugin + openshiftAdmissionPluginsForKubeAfterResourceQuota = []string{ + "quota.openshift.io/ClusterResourceQuota", + } + + // additionalDefaultOnPlugins is a list of plugins we turn on by default that core kube does not. + additionalDefaultOnPlugins = sets.NewString( + "NodeRestriction", + "OwnerReferencesPermissionEnforcement", + "PodNodeSelector", + "PodTolerationRestriction", + "Priority", + imagepolicyapiv1.PluginName, // "image.openshift.io/ImagePolicy" + "StorageObjectInUseProtection", + ) +) + +func NewOrderedKubeAdmissionPlugins(kubeAdmissionOrder []string) []string { + ret := []string{} + for _, curr := range kubeAdmissionOrder { + if curr == mutatingwebhook.PluginName { + ret = append(ret, openshiftAdmissionPluginsForKubeBeforeMutating...) + ret = append(ret, customresourcevalidationregistration.AllCustomResourceValidators...) + } + + ret = append(ret, curr) + + if curr == resourcequota.PluginName { + ret = append(ret, openshiftAdmissionPluginsForKubeAfterResourceQuota...) + } + } + return ret +} + +func NewDefaultOffPluginsFunc(kubeDefaultOffAdmission sets.Set[string]) func() sets.Set[string] { + return func() sets.Set[string] { + kubeOff := sets.New[string](kubeDefaultOffAdmission.UnsortedList()...) + kubeOff.Delete(additionalDefaultOnPlugins.List()...) + kubeOff.Delete(openshiftAdmissionPluginsForKubeBeforeMutating...) + kubeOff.Delete(openshiftAdmissionPluginsForKubeAfterResourceQuota...) + kubeOff.Delete(customresourcevalidationregistration.AllCustomResourceValidators...) + return kubeOff + } +} diff --git a/openshift-kube-apiserver/admission/admissionenablement/register_test.go b/openshift-kube-apiserver/admission/admissionenablement/register_test.go new file mode 100644 index 0000000000000..3c24cfa9e11e9 --- /dev/null +++ b/openshift-kube-apiserver/admission/admissionenablement/register_test.go @@ -0,0 +1,55 @@ +package admissionenablement + +import ( + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" + genericapiserver "k8s.io/apiserver/pkg/server" + "k8s.io/kubernetes/pkg/kubeapiserver/options" + + "github.com/openshift/library-go/pkg/apiserver/admission/admissionregistrationtesting" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration" +) + +func TestAdmissionRegistration(t *testing.T) { + orderedAdmissionChain := NewOrderedKubeAdmissionPlugins(options.AllOrderedPlugins) + defaultOffPlugins := NewDefaultOffPluginsFunc(options.DefaultOffAdmissionPlugins())() + registerAllAdmissionPlugins := func(plugins *admission.Plugins) { + genericapiserver.RegisterAllAdmissionPlugins(plugins) + options.RegisterAllAdmissionPlugins(plugins) + RegisterOpenshiftKubeAdmissionPlugins(plugins) + customresourcevalidationregistration.RegisterCustomResourceValidation(plugins) + } + plugins := admission.NewPlugins() + registerAllAdmissionPlugins(plugins) + + err := admissionregistrationtesting.AdmissionRegistrationTest(plugins, orderedAdmissionChain, sets.Set[string](defaultOffPlugins)) + if err != nil { + t.Fatal(err) + } +} + +// TestResourceQuotaBeforeClusterResourceQuota simply test wheather ResourceQuota plugin is before ClusterResourceQuota plugin +func TestResourceQuotaBeforeClusterResourceQuota(t *testing.T) { + orderedAdmissionChain := NewOrderedKubeAdmissionPlugins(options.AllOrderedPlugins) + + expectedOrderedAdmissionSubChain := []string{"ResourceQuota", "quota.openshift.io/ClusterResourceQuota", "AlwaysDeny"} + actualOrderedAdmissionChain := extractSubChain(orderedAdmissionChain, expectedOrderedAdmissionSubChain[0]) + + if !reflect.DeepEqual(actualOrderedAdmissionChain, expectedOrderedAdmissionSubChain) { + t.Fatalf("expected %v, got %v ", expectedOrderedAdmissionSubChain, actualOrderedAdmissionChain) + } +} + +func extractSubChain(admissionChain []string, takeFrom string) []string { + indexOfTake := 0 + for index, admission := range admissionChain { + if admission == takeFrom { + indexOfTake = index + break + } + } + return admissionChain[indexOfTake:] +} diff --git a/openshift-kube-apiserver/admission/authorization/restrictusers/groupcache_test.go b/openshift-kube-apiserver/admission/authorization/restrictusers/groupcache_test.go new file mode 100644 index 0000000000000..1dde83cbce2a2 --- /dev/null +++ b/openshift-kube-apiserver/admission/authorization/restrictusers/groupcache_test.go @@ -0,0 +1,28 @@ +package restrictusers + +import ( + userv1 "github.com/openshift/api/user/v1" +) + +type fakeGroupCache struct { + groups []userv1.Group +} + +func (g fakeGroupCache) GroupsFor(user string) ([]*userv1.Group, error) { + ret := []*userv1.Group{} + for i := range g.groups { + group := &g.groups[i] + for _, currUser := range group.Users { + if user == currUser { + ret = append(ret, group) + break + } + } + + } + return ret, nil +} + +func (g fakeGroupCache) HasSynced() bool { + return true +} diff --git a/openshift-kube-apiserver/admission/authorization/restrictusers/intializers.go b/openshift-kube-apiserver/admission/authorization/restrictusers/intializers.go new file mode 100644 index 0000000000000..d3fdcde4a5113 --- /dev/null +++ b/openshift-kube-apiserver/admission/authorization/restrictusers/intializers.go @@ -0,0 +1,28 @@ +package restrictusers + +import ( + "k8s.io/apiserver/pkg/admission" + + userinformer "github.com/openshift/client-go/user/informers/externalversions" +) + +func NewInitializer(userInformer userinformer.SharedInformerFactory) admission.PluginInitializer { + return &localInitializer{userInformer: userInformer} +} + +type WantsUserInformer interface { + SetUserInformer(userinformer.SharedInformerFactory) + admission.InitializationValidator +} + +type localInitializer struct { + userInformer userinformer.SharedInformerFactory +} + +// Initialize will check the initialization interfaces implemented by each plugin +// and provide the appropriate initialization data +func (i *localInitializer) Initialize(plugin admission.Interface) { + if wants, ok := plugin.(WantsUserInformer); ok { + wants.SetUserInformer(i.userInformer) + } +} diff --git a/openshift-kube-apiserver/admission/authorization/restrictusers/restrictusers.go b/openshift-kube-apiserver/admission/authorization/restrictusers/restrictusers.go new file mode 100644 index 0000000000000..4c78858203181 --- /dev/null +++ b/openshift-kube-apiserver/admission/authorization/restrictusers/restrictusers.go @@ -0,0 +1,234 @@ +package restrictusers + +import ( + "context" + "errors" + "fmt" + "io" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kerrors "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/apis/rbac" + + userv1 "github.com/openshift/api/user/v1" + authorizationtypedclient "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1" + userclient "github.com/openshift/client-go/user/clientset/versioned" + userinformer "github.com/openshift/client-go/user/informers/externalversions" + "github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/authorization/restrictusers/usercache" +) + +func Register(plugins *admission.Plugins) { + plugins.Register("authorization.openshift.io/RestrictSubjectBindings", + func(config io.Reader) (admission.Interface, error) { + return NewRestrictUsersAdmission() + }) +} + +type GroupCache interface { + GroupsFor(string) ([]*userv1.Group, error) + HasSynced() bool +} + +// restrictUsersAdmission implements admission.ValidateInterface and enforces +// restrictions on adding rolebindings in a project to permit only designated +// subjects. +type restrictUsersAdmission struct { + *admission.Handler + + roleBindingRestrictionsGetter authorizationtypedclient.RoleBindingRestrictionsGetter + userClient userclient.Interface + kubeClient kubernetes.Interface + groupCache GroupCache +} + +var _ = admissionrestconfig.WantsRESTClientConfig(&restrictUsersAdmission{}) +var _ = WantsUserInformer(&restrictUsersAdmission{}) +var _ = initializer.WantsExternalKubeClientSet(&restrictUsersAdmission{}) +var _ = admission.ValidationInterface(&restrictUsersAdmission{}) + +// NewRestrictUsersAdmission configures an admission plugin that enforces +// restrictions on adding role bindings in a project. +func NewRestrictUsersAdmission() (admission.Interface, error) { + return &restrictUsersAdmission{ + Handler: admission.NewHandler(admission.Create, admission.Update), + }, nil +} + +func (q *restrictUsersAdmission) SetExternalKubeClientSet(c kubernetes.Interface) { + q.kubeClient = c +} + +func (q *restrictUsersAdmission) SetRESTClientConfig(restClientConfig rest.Config) { + var err error + + // RoleBindingRestriction is served using CRD resource any status update must use JSON + jsonClientConfig := rest.CopyConfig(&restClientConfig) + jsonClientConfig.ContentConfig.AcceptContentTypes = "application/json" + jsonClientConfig.ContentConfig.ContentType = "application/json" + + q.roleBindingRestrictionsGetter, err = authorizationtypedclient.NewForConfig(jsonClientConfig) + if err != nil { + utilruntime.HandleError(err) + return + } + + q.userClient, err = userclient.NewForConfig(&restClientConfig) + if err != nil { + utilruntime.HandleError(err) + return + } +} + +func (q *restrictUsersAdmission) SetUserInformer(userInformers userinformer.SharedInformerFactory) { + q.groupCache = usercache.NewGroupCache(userInformers.User().V1().Groups()) +} + +// subjectsDelta returns the relative complement of elementsToIgnore in +// elements (i.e., elements∖elementsToIgnore). +func subjectsDelta(elementsToIgnore, elements []rbac.Subject) []rbac.Subject { + result := []rbac.Subject{} + + for _, el := range elements { + keep := true + for _, skipEl := range elementsToIgnore { + if el == skipEl { + keep = false + break + } + } + if keep { + result = append(result, el) + } + } + + return result +} + +// Admit makes admission decisions that enforce restrictions on adding +// project-scoped role-bindings. In order for a role binding to be permitted, +// each subject in the binding must be matched by some rolebinding restriction +// in the namespace. +func (q *restrictUsersAdmission) Validate(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) (err error) { + + // We only care about rolebindings + if a.GetResource().GroupResource() != rbac.Resource("rolebindings") { + return nil + } + + // Ignore all operations that correspond to subresource actions. + if len(a.GetSubresource()) != 0 { + return nil + } + + ns := a.GetNamespace() + // Ignore cluster-level resources. + if len(ns) == 0 { + return nil + } + + var oldSubjects []rbac.Subject + + obj, oldObj := a.GetObject(), a.GetOldObject() + + rolebinding, ok := obj.(*rbac.RoleBinding) + if !ok { + return admission.NewForbidden(a, + fmt.Errorf("wrong object type for new rolebinding: %T", obj)) + } + + if len(rolebinding.Subjects) == 0 { + klog.V(4).Infof("No new subjects; admitting") + return nil + } + + if oldObj != nil { + oldrolebinding, ok := oldObj.(*rbac.RoleBinding) + if !ok { + return admission.NewForbidden(a, + fmt.Errorf("wrong object type for old rolebinding: %T", oldObj)) + } + oldSubjects = oldrolebinding.Subjects + } + + klog.V(4).Infof("Handling rolebinding %s/%s", + rolebinding.Namespace, rolebinding.Name) + + newSubjects := subjectsDelta(oldSubjects, rolebinding.Subjects) + if len(newSubjects) == 0 { + klog.V(4).Infof("No new subjects; admitting") + return nil + } + + // RoleBindingRestrictions admission plugin is DefaultAllow, hence RBRs can't use an informer, + // because it's impossible to know if cache is up-to-date + roleBindingRestrictionList, err := q.roleBindingRestrictionsGetter.RoleBindingRestrictions(ns). + List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return admission.NewForbidden(a, fmt.Errorf("could not list rolebinding restrictions: %v", err)) + } + if len(roleBindingRestrictionList.Items) == 0 { + klog.V(4).Infof("No rolebinding restrictions specified; admitting") + return nil + } + + checkers := []SubjectChecker{} + for _, rbr := range roleBindingRestrictionList.Items { + checker, err := NewSubjectChecker(&rbr.Spec) + if err != nil { + return admission.NewForbidden(a, fmt.Errorf("could not create rolebinding restriction subject checker: %v", err)) + } + checkers = append(checkers, checker) + } + + roleBindingRestrictionContext, err := newRoleBindingRestrictionContext(ns, + q.kubeClient, q.userClient.UserV1(), q.groupCache) + if err != nil { + return admission.NewForbidden(a, fmt.Errorf("could not create rolebinding restriction context: %v", err)) + } + + checker := NewUnionSubjectChecker(checkers) + + errs := []error{} + for _, subject := range newSubjects { + allowed, err := checker.Allowed(subject, roleBindingRestrictionContext) + if err != nil { + errs = append(errs, err) + } + if !allowed { + errs = append(errs, + fmt.Errorf("rolebindings to %s %q are not allowed in project %q", + subject.Kind, subject.Name, ns)) + } + } + if len(errs) != 0 { + return admission.NewForbidden(a, kerrors.NewAggregate(errs)) + } + + klog.V(4).Infof("All new subjects are allowed; admitting") + + return nil +} + +func (q *restrictUsersAdmission) ValidateInitialization() error { + if q.kubeClient == nil { + return errors.New("RestrictUsersAdmission plugin requires a Kubernetes client") + } + if q.roleBindingRestrictionsGetter == nil { + return errors.New("RestrictUsersAdmission plugin requires an OpenShift client") + } + if q.userClient == nil { + return errors.New("RestrictUsersAdmission plugin requires an OpenShift user client") + } + if q.groupCache == nil { + return errors.New("RestrictUsersAdmission plugin requires a group cache") + } + + return nil +} diff --git a/openshift-kube-apiserver/admission/authorization/restrictusers/restrictusers_test.go b/openshift-kube-apiserver/admission/authorization/restrictusers/restrictusers_test.go new file mode 100644 index 0000000000000..50dd6eb5faea9 --- /dev/null +++ b/openshift-kube-apiserver/admission/authorization/restrictusers/restrictusers_test.go @@ -0,0 +1,404 @@ +package restrictusers + +import ( + "context" + "fmt" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/kubernetes/pkg/apis/rbac" + + authorizationv1 "github.com/openshift/api/authorization/v1" + userv1 "github.com/openshift/api/user/v1" + fakeauthorizationclient "github.com/openshift/client-go/authorization/clientset/versioned/fake" + fakeuserclient "github.com/openshift/client-go/user/clientset/versioned/fake" +) + +func TestAdmission(t *testing.T) { + var ( + userAlice = userv1.User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "Alice", + Labels: map[string]string{"foo": "bar"}, + }, + } + userAliceSubj = rbac.Subject{ + Kind: rbac.UserKind, + Name: "Alice", + } + + userBob = userv1.User{ + ObjectMeta: metav1.ObjectMeta{Name: "Bob"}, + Groups: []string{"group"}, + } + userBobSubj = rbac.Subject{ + Kind: rbac.UserKind, + Name: "Bob", + } + + group = userv1.Group{ + ObjectMeta: metav1.ObjectMeta{ + Name: "group", + Labels: map[string]string{"baz": "quux"}, + }, + Users: []string{userBobSubj.Name}, + } + groupSubj = rbac.Subject{ + Kind: rbac.GroupKind, + Name: "group", + } + + serviceaccount = corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "serviceaccount", + Labels: map[string]string{"xyzzy": "thud"}, + }, + } + serviceaccountSubj = rbac.Subject{ + Kind: rbac.ServiceAccountKind, + Namespace: "namespace", + Name: "serviceaccount", + } + ) + + testCases := []struct { + name string + expectedErr string + + object runtime.Object + oldObject runtime.Object + kind schema.GroupVersionKind + resource schema.GroupVersionResource + namespace string + subresource string + kubeObjects []runtime.Object + authorizationObjects []runtime.Object + userObjects []runtime.Object + }{ + { + name: "ignore (allow) if subresource is nonempty", + object: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{userAliceSubj}, + }, + oldObject: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{}, + }, + kind: rbac.Kind("RoleBinding").WithVersion("version"), + resource: rbac.Resource("rolebindings").WithVersion("version"), + namespace: "namespace", + subresource: "subresource", + kubeObjects: []runtime.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespace", + }, + }, + }, + }, + { + name: "ignore (allow) cluster-scoped rolebinding", + object: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{userAliceSubj}, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + oldObject: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{}, + }, + kind: rbac.Kind("RoleBinding").WithVersion("version"), + resource: rbac.Resource("rolebindings").WithVersion("version"), + namespace: "", + subresource: "", + kubeObjects: []runtime.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespace", + }, + }, + }, + }, + { + name: "allow if the namespace has no rolebinding restrictions", + object: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{ + userAliceSubj, + userBobSubj, + groupSubj, + serviceaccountSubj, + }, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + oldObject: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{}, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + kind: rbac.Kind("RoleBinding").WithVersion("version"), + resource: rbac.Resource("rolebindings").WithVersion("version"), + namespace: "namespace", + subresource: "", + kubeObjects: []runtime.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespace", + }, + }, + }, + }, + { + name: "allow if any rolebinding with the subject already exists", + object: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{ + userAliceSubj, + groupSubj, + serviceaccountSubj, + }, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + oldObject: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{ + userAliceSubj, + groupSubj, + serviceaccountSubj, + }, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + kind: rbac.Kind("RoleBinding").WithVersion("version"), + resource: rbac.Resource("rolebindings").WithVersion("version"), + namespace: "namespace", + subresource: "", + kubeObjects: []runtime.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespace", + }, + }, + }, + authorizationObjects: []runtime.Object{ + &authorizationv1.RoleBindingRestriction{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bogus-matcher", + Namespace: "namespace", + }, + Spec: authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{}, + }, + }, + }, + }, + { + name: "allow a user, group, or service account in a rolebinding if a literal matches", + object: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{ + userAliceSubj, + serviceaccountSubj, + groupSubj, + }, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + oldObject: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{}, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + kind: rbac.Kind("RoleBinding").WithVersion("version"), + resource: rbac.Resource("rolebindings").WithVersion("version"), + namespace: "namespace", + subresource: "", + kubeObjects: []runtime.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespace", + }, + }, + }, + authorizationObjects: []runtime.Object{ + &authorizationv1.RoleBindingRestriction{ + ObjectMeta: metav1.ObjectMeta{ + Name: "match-users", + Namespace: "namespace", + }, + Spec: authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{ + Users: []string{userAlice.Name}, + }, + }, + }, + &authorizationv1.RoleBindingRestriction{ + ObjectMeta: metav1.ObjectMeta{ + Name: "match-groups", + Namespace: "namespace", + }, + Spec: authorizationv1.RoleBindingRestrictionSpec{ + GroupRestriction: &authorizationv1.GroupRestriction{ + Groups: []string{group.Name}, + }, + }, + }, + &authorizationv1.RoleBindingRestriction{ + ObjectMeta: metav1.ObjectMeta{ + Name: "match-serviceaccounts", + Namespace: "namespace", + }, + Spec: authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + { + Name: serviceaccount.Name, + Namespace: serviceaccount.Namespace, + }, + }, + }, + }, + }, + }, + }, + { + name: "prohibit user without a matching user literal", + expectedErr: fmt.Sprintf("rolebindings to %s %q are not allowed", + userAliceSubj.Kind, userAliceSubj.Name), + object: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{ + userAliceSubj, + }, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + oldObject: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{}, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + kind: rbac.Kind("RoleBinding").WithVersion("version"), + resource: rbac.Resource("rolebindings").WithVersion("version"), + namespace: "namespace", + subresource: "", + kubeObjects: []runtime.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespace", + }, + }, + }, + authorizationObjects: []runtime.Object{ + &authorizationv1.RoleBindingRestriction{ + ObjectMeta: metav1.ObjectMeta{ + Name: "match-users-bob", + Namespace: "namespace", + }, + Spec: authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{ + Users: []string{userBobSubj.Name}, + }, + }, + }, + }, + userObjects: []runtime.Object{ + &userAlice, + &userBob, + }, + }, + } + + stopCh := make(chan struct{}) + defer close(stopCh) + + for _, tc := range testCases { + kclientset := fake.NewSimpleClientset(tc.kubeObjects...) + fakeUserClient := fakeuserclient.NewSimpleClientset(tc.userObjects...) + fakeAuthorizationClient := fakeauthorizationclient.NewSimpleClientset(tc.authorizationObjects...) + + plugin, err := NewRestrictUsersAdmission() + if err != nil { + t.Errorf("unexpected error initializing admission plugin: %v", err) + } + + plugin.(*restrictUsersAdmission).kubeClient = kclientset + plugin.(*restrictUsersAdmission).roleBindingRestrictionsGetter = fakeAuthorizationClient.AuthorizationV1() + plugin.(*restrictUsersAdmission).userClient = fakeUserClient + plugin.(*restrictUsersAdmission).groupCache = fakeGroupCache{} + + err = admission.ValidateInitialization(plugin) + if err != nil { + t.Errorf("unexpected error validating admission plugin: %v", err) + } + + attributes := admission.NewAttributesRecord( + tc.object, + tc.oldObject, + tc.kind, + tc.namespace, + tc.name, + tc.resource, + tc.subresource, + admission.Create, + nil, + false, + &user.DefaultInfo{}, + ) + + err = plugin.(admission.ValidationInterface).Validate(context.TODO(), attributes, nil) + switch { + case len(tc.expectedErr) == 0 && err == nil: + case len(tc.expectedErr) == 0 && err != nil: + t.Errorf("%s: unexpected error: %v", tc.name, err) + case len(tc.expectedErr) != 0 && err == nil: + t.Errorf("%s: missing error: %v", tc.name, tc.expectedErr) + case len(tc.expectedErr) != 0 && err != nil && + !strings.Contains(err.Error(), tc.expectedErr): + t.Errorf("%s: missing error: expected %v, got %v", + tc.name, tc.expectedErr, err) + } + } +} diff --git a/openshift-kube-apiserver/admission/authorization/restrictusers/subjectchecker.go b/openshift-kube-apiserver/admission/authorization/restrictusers/subjectchecker.go new file mode 100644 index 0000000000000..2e10e182b9de9 --- /dev/null +++ b/openshift-kube-apiserver/admission/authorization/restrictusers/subjectchecker.go @@ -0,0 +1,312 @@ +package restrictusers + +import ( + "context" + "fmt" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/pkg/apis/rbac" + + authorizationv1 "github.com/openshift/api/authorization/v1" + userv1 "github.com/openshift/api/user/v1" + userclient "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1" +) + +// SubjectChecker determines whether rolebindings on a subject (user, group, or +// service account) are allowed in a project. +type SubjectChecker interface { + Allowed(rbac.Subject, *RoleBindingRestrictionContext) (bool, error) +} + +// UnionSubjectChecker represents the union of zero or more SubjectCheckers. +type UnionSubjectChecker []SubjectChecker + +// NewUnionSubjectChecker returns a new UnionSubjectChecker. +func NewUnionSubjectChecker(checkers []SubjectChecker) UnionSubjectChecker { + return UnionSubjectChecker(checkers) +} + +// Allowed determines whether the given subject is allowed in rolebindings in +// the project. +func (checkers UnionSubjectChecker) Allowed(subject rbac.Subject, ctx *RoleBindingRestrictionContext) (bool, error) { + errs := []error{} + for _, checker := range []SubjectChecker(checkers) { + allowed, err := checker.Allowed(subject, ctx) + if err != nil { + errs = append(errs, err) + } else if allowed { + return true, nil + } + } + + return false, kerrors.NewAggregate(errs) +} + +// RoleBindingRestrictionContext holds context that is used when determining +// whether a RoleBindingRestriction allows rolebindings on a particular subject. +type RoleBindingRestrictionContext struct { + userClient userclient.UserV1Interface + kclient kubernetes.Interface + + // groupCache maps user name to groups. + groupCache GroupCache + + // userToLabels maps user name to labels.Set. + userToLabelSet map[string]labels.Set + + // groupToLabels maps group name to labels.Set. + groupToLabelSet map[string]labels.Set + + // namespace is the namespace for which the RoleBindingRestriction makes + // determinations. + namespace string +} + +// NewRoleBindingRestrictionContext returns a new RoleBindingRestrictionContext +// object. +func newRoleBindingRestrictionContext(ns string, kc kubernetes.Interface, userClient userclient.UserV1Interface, groupCache GroupCache) (*RoleBindingRestrictionContext, error) { + return &RoleBindingRestrictionContext{ + namespace: ns, + kclient: kc, + userClient: userClient, + groupCache: groupCache, + userToLabelSet: map[string]labels.Set{}, + groupToLabelSet: map[string]labels.Set{}, + }, nil +} + +// labelSetForUser returns the label set for the given user subject. +func (ctx *RoleBindingRestrictionContext) labelSetForUser(subject rbac.Subject) (labels.Set, error) { + if subject.Kind != rbac.UserKind { + return labels.Set{}, fmt.Errorf("not a user: %q", subject.Name) + } + + labelSet, ok := ctx.userToLabelSet[subject.Name] + if ok { + return labelSet, nil + } + + user, err := ctx.userClient.Users().Get(context.TODO(), subject.Name, metav1.GetOptions{}) + if err != nil { + return labels.Set{}, err + } + + ctx.userToLabelSet[subject.Name] = labels.Set(user.Labels) + + return ctx.userToLabelSet[subject.Name], nil +} + +// groupsForUser returns the groups for the given user subject. +func (ctx *RoleBindingRestrictionContext) groupsForUser(subject rbac.Subject) ([]*userv1.Group, error) { + if subject.Kind != rbac.UserKind { + return []*userv1.Group{}, fmt.Errorf("not a user: %q", subject.Name) + } + + err := wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) { + return ctx.groupCache.HasSynced(), nil + }) + if err != nil { + return nil, fmt.Errorf("groups.user.openshift.io cache is not synchronized") + } + + return ctx.groupCache.GroupsFor(subject.Name) +} + +// labelSetForGroup returns the label set for the given group subject. +func (ctx *RoleBindingRestrictionContext) labelSetForGroup(subject rbac.Subject) (labels.Set, error) { + if subject.Kind != rbac.GroupKind { + return labels.Set{}, fmt.Errorf("not a group: %q", subject.Name) + } + + labelSet, ok := ctx.groupToLabelSet[subject.Name] + if ok { + return labelSet, nil + } + + group, err := ctx.userClient.Groups().Get(context.TODO(), subject.Name, metav1.GetOptions{}) + if err != nil { + return labels.Set{}, err + } + + ctx.groupToLabelSet[subject.Name] = labels.Set(group.Labels) + + return ctx.groupToLabelSet[subject.Name], nil +} + +// UserSubjectChecker determines whether a user subject is allowed in +// rolebindings in the project. +type UserSubjectChecker struct { + userRestriction *authorizationv1.UserRestriction +} + +// NewUserSubjectChecker returns a new UserSubjectChecker. +func NewUserSubjectChecker(userRestriction *authorizationv1.UserRestriction) UserSubjectChecker { + return UserSubjectChecker{userRestriction: userRestriction} +} + +// Allowed determines whether the given user subject is allowed in rolebindings +// in the project. +func (checker UserSubjectChecker) Allowed(subject rbac.Subject, ctx *RoleBindingRestrictionContext) (bool, error) { + if subject.Kind != rbac.UserKind { + return false, nil + } + + for _, userName := range checker.userRestriction.Users { + if subject.Name == userName { + return true, nil + } + } + + if len(checker.userRestriction.Groups) != 0 { + subjectGroups, err := ctx.groupsForUser(subject) + if err != nil { + return false, err + } + + for _, groupName := range checker.userRestriction.Groups { + for _, group := range subjectGroups { + if group.Name == groupName { + return true, nil + } + } + } + } + + if len(checker.userRestriction.Selectors) != 0 { + labelSet, err := ctx.labelSetForUser(subject) + if err != nil { + return false, err + } + + for _, labelSelector := range checker.userRestriction.Selectors { + selector, err := metav1.LabelSelectorAsSelector(&labelSelector) + if err != nil { + return false, err + } + + if selector.Matches(labelSet) { + return true, nil + } + } + } + + return false, nil +} + +// GroupSubjectChecker determines whether a group subject is allowed in +// rolebindings in the project. +type GroupSubjectChecker struct { + groupRestriction *authorizationv1.GroupRestriction +} + +// NewGroupSubjectChecker returns a new GroupSubjectChecker. +func NewGroupSubjectChecker(groupRestriction *authorizationv1.GroupRestriction) GroupSubjectChecker { + return GroupSubjectChecker{groupRestriction: groupRestriction} +} + +// Allowed determines whether the given group subject is allowed in rolebindings +// in the project. +func (checker GroupSubjectChecker) Allowed(subject rbac.Subject, ctx *RoleBindingRestrictionContext) (bool, error) { + if subject.Kind != rbac.GroupKind { + return false, nil + } + + for _, groupName := range checker.groupRestriction.Groups { + if subject.Name == groupName { + return true, nil + } + } + + if len(checker.groupRestriction.Selectors) != 0 { + labelSet, err := ctx.labelSetForGroup(subject) + if err != nil { + return false, err + } + + for _, labelSelector := range checker.groupRestriction.Selectors { + selector, err := metav1.LabelSelectorAsSelector(&labelSelector) + if err != nil { + return false, err + } + + if selector.Matches(labelSet) { + return true, nil + } + } + } + + return false, nil +} + +// ServiceAccountSubjectChecker determines whether a serviceaccount subject is +// allowed in rolebindings in the project. +type ServiceAccountSubjectChecker struct { + serviceAccountRestriction *authorizationv1.ServiceAccountRestriction +} + +// NewServiceAccountSubjectChecker returns a new ServiceAccountSubjectChecker. +func NewServiceAccountSubjectChecker(serviceAccountRestriction *authorizationv1.ServiceAccountRestriction) ServiceAccountSubjectChecker { + return ServiceAccountSubjectChecker{ + serviceAccountRestriction: serviceAccountRestriction, + } +} + +// Allowed determines whether the given serviceaccount subject is allowed in +// rolebindings in the project. +func (checker ServiceAccountSubjectChecker) Allowed(subject rbac.Subject, ctx *RoleBindingRestrictionContext) (bool, error) { + if subject.Kind != rbac.ServiceAccountKind { + return false, nil + } + + subjectNamespace := subject.Namespace + if len(subjectNamespace) == 0 { + // If a RoleBinding has a subject that is a ServiceAccount with + // no namespace specified, the namespace will be defaulted to + // that of the RoleBinding. However, admission control plug-ins + // execute before this happens, so in order not to reject such + // subjects erroneously, we copy the logic here of using the + // RoleBinding's namespace if the subject's is empty. + subjectNamespace = ctx.namespace + } + + for _, namespace := range checker.serviceAccountRestriction.Namespaces { + if subjectNamespace == namespace { + return true, nil + } + } + + for _, serviceAccountRef := range checker.serviceAccountRestriction.ServiceAccounts { + serviceAccountNamespace := serviceAccountRef.Namespace + if len(serviceAccountNamespace) == 0 { + serviceAccountNamespace = ctx.namespace + } + + if subject.Name == serviceAccountRef.Name && + subjectNamespace == serviceAccountNamespace { + return true, nil + } + } + + return false, nil +} + +// NewSubjectChecker returns a new SubjectChecker. +func NewSubjectChecker(spec *authorizationv1.RoleBindingRestrictionSpec) (SubjectChecker, error) { + switch { + case spec.UserRestriction != nil: + return NewUserSubjectChecker(spec.UserRestriction), nil + + case spec.GroupRestriction != nil: + return NewGroupSubjectChecker(spec.GroupRestriction), nil + + case spec.ServiceAccountRestriction != nil: + return NewServiceAccountSubjectChecker(spec.ServiceAccountRestriction), nil + } + + return nil, fmt.Errorf("invalid RoleBindingRestrictionSpec: %v", spec) +} diff --git a/openshift-kube-apiserver/admission/authorization/restrictusers/subjectchecker_test.go b/openshift-kube-apiserver/admission/authorization/restrictusers/subjectchecker_test.go new file mode 100644 index 0000000000000..4580d3582f93e --- /dev/null +++ b/openshift-kube-apiserver/admission/authorization/restrictusers/subjectchecker_test.go @@ -0,0 +1,349 @@ +package restrictusers + +import ( + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/kubernetes/pkg/apis/rbac" + + authorizationv1 "github.com/openshift/api/authorization/v1" + userv1 "github.com/openshift/api/user/v1" + fakeuserclient "github.com/openshift/client-go/user/clientset/versioned/fake" +) + +func mustNewSubjectChecker(t *testing.T, spec *authorizationv1.RoleBindingRestrictionSpec) SubjectChecker { + checker, err := NewSubjectChecker(spec) + if err != nil { + t.Errorf("unexpected error from NewChecker: %v, spec: %#v", err, spec) + } + + return checker +} + +func TestSubjectCheckers(t *testing.T) { + var ( + userBobRef = rbac.Subject{ + Kind: rbac.UserKind, + Name: "Bob", + } + userAliceRef = rbac.Subject{ + Kind: rbac.UserKind, + Name: "Alice", + } + groupRef = rbac.Subject{ + Kind: rbac.GroupKind, + Name: "group", + } + serviceaccountRef = rbac.Subject{ + Kind: rbac.ServiceAccountKind, + Namespace: "namespace", + Name: "serviceaccount", + } + group = userv1.Group{ + ObjectMeta: metav1.ObjectMeta{ + Name: "group", + Labels: map[string]string{"baz": "quux"}, + }, + Users: []string{userBobRef.Name}, + } + userObjects = []runtime.Object{ + &userv1.User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "Alice", + Labels: map[string]string{"foo": "bar"}, + }, + }, + &userv1.User{ + ObjectMeta: metav1.ObjectMeta{Name: "Bob"}, + Groups: []string{"group"}, + }, + &group, + } + kubeObjects = []runtime.Object{ + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "serviceaccount", + Labels: map[string]string{"xyzzy": "thud"}, + }, + }, + } + ) + + testCases := []struct { + name string + checker SubjectChecker + subject rbac.Subject + shouldAllow bool + }{ + { + name: "allow regular user by literal name match", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{ + Users: []string{userAliceRef.Name}, + }, + }), + subject: userAliceRef, + shouldAllow: true, + }, + { + name: "allow regular user by group membership", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{ + Groups: []string{groupRef.Name}, + }, + }), + subject: userBobRef, + shouldAllow: true, + }, + { + name: "prohibit regular user when another user matches on group membership", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{ + Groups: []string{groupRef.Name}, + }, + }), + subject: userAliceRef, + shouldAllow: false, + }, + { + name: "allow regular user by label selector match", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{ + Selectors: []metav1.LabelSelector{ + {MatchLabels: map[string]string{"foo": "bar"}}, + }, + }, + }), + subject: userAliceRef, + shouldAllow: true, + }, + { + name: "prohibit regular user when another user matches on label selector", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{ + Selectors: []metav1.LabelSelector{ + {MatchLabels: map[string]string{"foo": "bar"}}, + }, + }, + }), + subject: userBobRef, + shouldAllow: false, + }, + { + name: "allow regular group by literal name match", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + GroupRestriction: &authorizationv1.GroupRestriction{ + Groups: []string{groupRef.Name}, + }, + }), + subject: groupRef, + shouldAllow: true, + }, + { + name: "allow regular group by label selector match", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + GroupRestriction: &authorizationv1.GroupRestriction{ + Selectors: []metav1.LabelSelector{ + {MatchLabels: map[string]string{"baz": "quux"}}, + }, + }, + }), + subject: groupRef, + shouldAllow: true, + }, + { + name: "allow service account with explicit namespace by match on literal name and explicit namespace", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + { + Name: serviceaccountRef.Name, + Namespace: serviceaccountRef.Namespace, + }, + }, + }, + }), + subject: serviceaccountRef, + shouldAllow: true, + }, + { + name: "allow service account with explicit namespace by match on literal name and implicit namespace", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + {Name: serviceaccountRef.Name}, + }, + }, + }), + subject: serviceaccountRef, + shouldAllow: true, + }, + { + name: "prohibit service account with explicit namespace where literal name matches but explicit namespace does not", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + { + Namespace: serviceaccountRef.Namespace, + Name: serviceaccountRef.Name, + }, + }, + }, + }), + subject: rbac.Subject{ + Kind: rbac.ServiceAccountKind, + Namespace: "othernamespace", + Name: serviceaccountRef.Name, + }, + shouldAllow: false, + }, + { + name: "prohibit service account with explicit namespace where literal name matches but implicit namespace does not", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + {Name: serviceaccountRef.Name}, + }, + }, + }), + subject: rbac.Subject{ + Kind: rbac.ServiceAccountKind, + Namespace: "othernamespace", + Name: serviceaccountRef.Name, + }, + shouldAllow: false, + }, + { + name: "allow service account with implicit namespace by match on literal name and explicit namespace", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + { + Name: serviceaccountRef.Name, + Namespace: serviceaccountRef.Namespace, + }, + }, + }, + }), + subject: rbac.Subject{ + Kind: rbac.ServiceAccountKind, + Name: serviceaccountRef.Name, + }, + shouldAllow: true, + }, + { + name: "allow service account with implicit namespace by match on literal name and implicit namespace", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + {Name: serviceaccountRef.Name}, + }, + }, + }), + subject: rbac.Subject{ + Kind: rbac.ServiceAccountKind, + Name: serviceaccountRef.Name, + }, + shouldAllow: true, + }, + { + name: "prohibit service account with implicit namespace where literal name matches but explicit namespace does not", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + { + Namespace: "othernamespace", + Name: serviceaccountRef.Name, + }, + }, + }, + }), + subject: rbac.Subject{ + Kind: rbac.ServiceAccountKind, + Name: serviceaccountRef.Name, + }, + shouldAllow: false, + }, + { + name: "prohibit service account with explicit namespace where explicit namespace matches but literal name does not", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + { + Namespace: serviceaccountRef.Namespace, + Name: "othername", + }, + }, + }, + }), + subject: serviceaccountRef, + shouldAllow: false, + }, + { + name: "allow service account by match on namespace", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + Namespaces: []string{serviceaccountRef.Namespace}, + }, + }), + subject: serviceaccountRef, + shouldAllow: true, + }, + } + + stopCh := make(chan struct{}) + defer close(stopCh) + + kclient := fake.NewSimpleClientset(kubeObjects...) + fakeUserClient := fakeuserclient.NewSimpleClientset(userObjects...) + groupCache := fakeGroupCache{groups: []userv1.Group{group}} + // This is a terrible, horrible, no-good, very bad hack to avoid a race + // condition between the test "allow regular user by group membership" + // and the group cache's initialisation. + for { + if groups, _ := groupCache.GroupsFor(group.Users[0]); len(groups) == 1 { + break + } + time.Sleep(10 * time.Millisecond) + } + + ctx, err := newRoleBindingRestrictionContext("namespace", + kclient, fakeUserClient.UserV1(), groupCache) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + for _, tc := range testCases { + allowed, err := tc.checker.Allowed(tc.subject, ctx) + if err != nil { + t.Errorf("test case %v: unexpected error: %v", tc.name, err) + } + if allowed && !tc.shouldAllow { + t.Errorf("test case %v: subject allowed but should be prohibited", tc.name) + } + if !allowed && tc.shouldAllow { + t.Errorf("test case %v: subject prohibited but should be allowed", tc.name) + } + } +} diff --git a/openshift-kube-apiserver/admission/authorization/restrictusers/usercache/groups.go b/openshift-kube-apiserver/admission/authorization/restrictusers/usercache/groups.go new file mode 100644 index 0000000000000..99a8156be3053 --- /dev/null +++ b/openshift-kube-apiserver/admission/authorization/restrictusers/usercache/groups.go @@ -0,0 +1,55 @@ +package usercache + +import ( + "fmt" + + "k8s.io/client-go/tools/cache" + + userapi "github.com/openshift/api/user/v1" + userinformer "github.com/openshift/client-go/user/informers/externalversions/user/v1" +) + +// GroupCache is a skin on an indexer to provide the reverse index from user to groups. +// Once we work out a cleaner way to extend a lister, this should live there. +type GroupCache struct { + indexer cache.Indexer + groupsSynced cache.InformerSynced +} + +const ByUserIndexName = "ByUser" + +// ByUserIndexKeys is cache.IndexFunc for Groups that will index groups by User, so that a direct cache lookup +// using a User.Name will return all Groups that User is a member of +func ByUserIndexKeys(obj interface{}) ([]string, error) { + group, ok := obj.(*userapi.Group) + if !ok { + return nil, fmt.Errorf("unexpected type: %v", obj) + } + + return group.Users, nil +} + +func NewGroupCache(groupInformer userinformer.GroupInformer) *GroupCache { + return &GroupCache{ + indexer: groupInformer.Informer().GetIndexer(), + groupsSynced: groupInformer.Informer().HasSynced, + } +} + +func (c *GroupCache) GroupsFor(username string) ([]*userapi.Group, error) { + objs, err := c.indexer.ByIndex(ByUserIndexName, username) + if err != nil { + return nil, err + } + + groups := make([]*userapi.Group, len(objs)) + for i := range objs { + groups[i] = objs[i].(*userapi.Group) + } + + return groups, nil +} + +func (c *GroupCache) HasSynced() bool { + return c.groupsSynced() +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/doc.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/doc.go new file mode 100644 index 0000000000000..7f2a6f888d472 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package api is the internal version of the API. +package clusterresourceoverride diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/name.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/name.go new file mode 100644 index 0000000000000..f136def581ed5 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/name.go @@ -0,0 +1,4 @@ +package clusterresourceoverride + +const PluginName = "autoscaling.openshift.io/ClusterResourceOverride" +const ConfigKind = "ClusterResourceOverrideConfig" diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/register.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/register.go new file mode 100644 index 0000000000000..5308853cfd134 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/register.go @@ -0,0 +1,23 @@ +package clusterresourceoverride + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var GroupVersion = schema.GroupVersion{Group: "autoscaling.openshift.io", Version: runtime.APIVersionInternal} + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + Install = schemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ClusterResourceOverrideConfig{}, + ) + return nil +} + +func (obj *ClusterResourceOverrideConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/types.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/types.go new file mode 100644 index 0000000000000..3718e265caafa --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/types.go @@ -0,0 +1,24 @@ +package clusterresourceoverride + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterResourceOverrideConfig is the configuration for the ClusterResourceOverride +// admission controller which overrides user-provided container request/limit values. +type ClusterResourceOverrideConfig struct { + metav1.TypeMeta + // For each of the following, if a non-zero ratio is specified then the initial + // value (if any) in the pod spec is overwritten according to the ratio. + // LimitRange defaults are merged prior to the override. + // + // LimitCPUToMemoryPercent (if > 0) overrides the CPU limit to a ratio of the memory limit; + // 100% overrides CPU to 1 core per 1GiB of RAM. This is done before overriding the CPU request. + LimitCPUToMemoryPercent int64 + // CPURequestToLimitPercent (if > 0) overrides CPU request to a percentage of CPU limit + CPURequestToLimitPercent int64 + // MemoryRequestToLimitPercent (if > 0) overrides memory request to a percentage of memory limit + MemoryRequestToLimitPercent int64 +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/doc.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/doc.go new file mode 100644 index 0000000000000..7397986b23605 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride + +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/register.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/register.go new file mode 100644 index 0000000000000..91d44566e3476 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/register.go @@ -0,0 +1,27 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride" +) + +func (obj *ClusterResourceOverrideConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } + +var GroupVersion = schema.GroupVersion{Group: "autoscaling.openshift.io", Version: "v1"} + +var ( + localSchemeBuilder = runtime.NewSchemeBuilder( + addKnownTypes, + clusterresourceoverride.Install, + ) + Install = localSchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ClusterResourceOverrideConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/swagger_doc.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/swagger_doc.go new file mode 100644 index 0000000000000..f909b0db2ee4f --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/swagger_doc.go @@ -0,0 +1,17 @@ +package v1 + +// This file contains methods that can be used by the go-restful package to generate Swagger +// documentation for the object types found in 'types.go' This file is automatically generated +// by hack/update-generated-swagger-descriptions.sh and should be run after a full build of OpenShift. +// ==== DO NOT EDIT THIS FILE MANUALLY ==== + +var map_ClusterResourceOverrideConfig = map[string]string{ + "": "ClusterResourceOverrideConfig is the configuration for the ClusterResourceOverride admission controller which overrides user-provided container request/limit values.", + "limitCPUToMemoryPercent": "For each of the following, if a non-zero ratio is specified then the initial value (if any) in the pod spec is overwritten according to the ratio. LimitRange defaults are merged prior to the override.\n\nLimitCPUToMemoryPercent (if > 0) overrides the CPU limit to a ratio of the memory limit; 100% overrides CPU to 1 core per 1GiB of RAM. This is done before overriding the CPU request.", + "cpuRequestToLimitPercent": "CPURequestToLimitPercent (if > 0) overrides CPU request to a percentage of CPU limit", + "memoryRequestToLimitPercent": "MemoryRequestToLimitPercent (if > 0) overrides memory request to a percentage of memory limit", +} + +func (ClusterResourceOverrideConfig) SwaggerDoc() map[string]string { + return map_ClusterResourceOverrideConfig +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/types.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/types.go new file mode 100644 index 0000000000000..9a56034174e15 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/types.go @@ -0,0 +1,24 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterResourceOverrideConfig is the configuration for the ClusterResourceOverride +// admission controller which overrides user-provided container request/limit values. +type ClusterResourceOverrideConfig struct { + metav1.TypeMeta `json:",inline"` + // For each of the following, if a non-zero ratio is specified then the initial + // value (if any) in the pod spec is overwritten according to the ratio. + // LimitRange defaults are merged prior to the override. + // + // LimitCPUToMemoryPercent (if > 0) overrides the CPU limit to a ratio of the memory limit; + // 100% overrides CPU to 1 core per 1GiB of RAM. This is done before overriding the CPU request. + LimitCPUToMemoryPercent int64 `json:"limitCPUToMemoryPercent"` + // CPURequestToLimitPercent (if > 0) overrides CPU request to a percentage of CPU limit + CPURequestToLimitPercent int64 `json:"cpuRequestToLimitPercent"` + // MemoryRequestToLimitPercent (if > 0) overrides memory request to a percentage of memory limit + MemoryRequestToLimitPercent int64 `json:"memoryRequestToLimitPercent"` +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/zz_generated.conversion.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/zz_generated.conversion.go new file mode 100644 index 0000000000000..27dc8863cd2c1 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/zz_generated.conversion.go @@ -0,0 +1,72 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + clusterresourceoverride "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*ClusterResourceOverrideConfig)(nil), (*clusterresourceoverride.ClusterResourceOverrideConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ClusterResourceOverrideConfig_To_clusterresourceoverride_ClusterResourceOverrideConfig(a.(*ClusterResourceOverrideConfig), b.(*clusterresourceoverride.ClusterResourceOverrideConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*clusterresourceoverride.ClusterResourceOverrideConfig)(nil), (*ClusterResourceOverrideConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clusterresourceoverride_ClusterResourceOverrideConfig_To_v1_ClusterResourceOverrideConfig(a.(*clusterresourceoverride.ClusterResourceOverrideConfig), b.(*ClusterResourceOverrideConfig), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_ClusterResourceOverrideConfig_To_clusterresourceoverride_ClusterResourceOverrideConfig(in *ClusterResourceOverrideConfig, out *clusterresourceoverride.ClusterResourceOverrideConfig, s conversion.Scope) error { + out.LimitCPUToMemoryPercent = in.LimitCPUToMemoryPercent + out.CPURequestToLimitPercent = in.CPURequestToLimitPercent + out.MemoryRequestToLimitPercent = in.MemoryRequestToLimitPercent + return nil +} + +// Convert_v1_ClusterResourceOverrideConfig_To_clusterresourceoverride_ClusterResourceOverrideConfig is an autogenerated conversion function. +func Convert_v1_ClusterResourceOverrideConfig_To_clusterresourceoverride_ClusterResourceOverrideConfig(in *ClusterResourceOverrideConfig, out *clusterresourceoverride.ClusterResourceOverrideConfig, s conversion.Scope) error { + return autoConvert_v1_ClusterResourceOverrideConfig_To_clusterresourceoverride_ClusterResourceOverrideConfig(in, out, s) +} + +func autoConvert_clusterresourceoverride_ClusterResourceOverrideConfig_To_v1_ClusterResourceOverrideConfig(in *clusterresourceoverride.ClusterResourceOverrideConfig, out *ClusterResourceOverrideConfig, s conversion.Scope) error { + out.LimitCPUToMemoryPercent = in.LimitCPUToMemoryPercent + out.CPURequestToLimitPercent = in.CPURequestToLimitPercent + out.MemoryRequestToLimitPercent = in.MemoryRequestToLimitPercent + return nil +} + +// Convert_clusterresourceoverride_ClusterResourceOverrideConfig_To_v1_ClusterResourceOverrideConfig is an autogenerated conversion function. +func Convert_clusterresourceoverride_ClusterResourceOverrideConfig_To_v1_ClusterResourceOverrideConfig(in *clusterresourceoverride.ClusterResourceOverrideConfig, out *ClusterResourceOverrideConfig, s conversion.Scope) error { + return autoConvert_clusterresourceoverride_ClusterResourceOverrideConfig_To_v1_ClusterResourceOverrideConfig(in, out, s) +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..c65004232adda --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/zz_generated.deepcopy.go @@ -0,0 +1,51 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceOverrideConfig) DeepCopyInto(out *ClusterResourceOverrideConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceOverrideConfig. +func (in *ClusterResourceOverrideConfig) DeepCopy() *ClusterResourceOverrideConfig { + if in == nil { + return nil + } + out := new(ClusterResourceOverrideConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterResourceOverrideConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/validation/validation.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/validation/validation.go new file mode 100644 index 0000000000000..14cdcdd586abf --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/validation/validation.go @@ -0,0 +1,27 @@ +package validation + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride" +) + +func Validate(config *clusterresourceoverride.ClusterResourceOverrideConfig) field.ErrorList { + allErrs := field.ErrorList{} + if config == nil { + return allErrs + } + if config.LimitCPUToMemoryPercent == 0 && config.CPURequestToLimitPercent == 0 && config.MemoryRequestToLimitPercent == 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath(clusterresourceoverride.PluginName), "plugin enabled but no percentages were specified")) + } + if config.LimitCPUToMemoryPercent < 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath(clusterresourceoverride.PluginName, "LimitCPUToMemoryPercent"), config.LimitCPUToMemoryPercent, "must be positive")) + } + if config.CPURequestToLimitPercent < 0 || config.CPURequestToLimitPercent > 100 { + allErrs = append(allErrs, field.Invalid(field.NewPath(clusterresourceoverride.PluginName, "CPURequestToLimitPercent"), config.CPURequestToLimitPercent, "must be between 0 and 100")) + } + if config.MemoryRequestToLimitPercent < 0 || config.MemoryRequestToLimitPercent > 100 { + allErrs = append(allErrs, field.Invalid(field.NewPath(clusterresourceoverride.PluginName, "MemoryRequestToLimitPercent"), config.MemoryRequestToLimitPercent, "must be between 0 and 100")) + } + return allErrs +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..13736ecd59c18 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/zz_generated.deepcopy.go @@ -0,0 +1,51 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package clusterresourceoverride + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceOverrideConfig) DeepCopyInto(out *ClusterResourceOverrideConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceOverrideConfig. +func (in *ClusterResourceOverrideConfig) DeepCopy() *ClusterResourceOverrideConfig { + if in == nil { + return nil + } + out := new(ClusterResourceOverrideConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterResourceOverrideConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/doc.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/doc.go new file mode 100644 index 0000000000000..2eb498613c0ad --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package api is the internal version of the API. +package runonceduration diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/register.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/register.go new file mode 100644 index 0000000000000..379c2be1ed1a5 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/register.go @@ -0,0 +1,34 @@ +package runonceduration + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var GroupVersion = schema.GroupVersion{Group: "autoscaling.openshift.io", Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return GroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return GroupVersion.WithResource(resource).GroupResource() +} + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + Install = schemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &RunOnceDurationConfig{}, + ) + return nil +} + +func (obj *RunOnceDurationConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/types.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/types.go new file mode 100644 index 0000000000000..1a9f5a112c90a --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/types.go @@ -0,0 +1,26 @@ +package runonceduration + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RunOnceDurationConfig is the configuration for the RunOnceDuration plugin. +// It specifies a maximum value for ActiveDeadlineSeconds for a run-once pod. +// The project that contains the pod may specify a different setting. That setting will +// take precedence over the one configured for the plugin here. +type RunOnceDurationConfig struct { + metav1.TypeMeta + + // ActiveDeadlineSecondsOverride is the maximum value to set on containers of run-once pods + // Only a positive value is valid. Absence of a value means that the plugin + // won't make any changes to the pod + ActiveDeadlineSecondsOverride *int64 +} + +// ActiveDeadlineSecondsLimitAnnotation can be set on a project to limit the number of +// seconds that a run-once pod can be active in that project +// TODO: this label needs to change to reflect its function. It's a limit, not an override. +// It is kept this way for compatibility. Only change it in a new version of the API. +const ActiveDeadlineSecondsLimitAnnotation = "openshift.io/active-deadline-seconds-override" diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/conversion.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/conversion.go new file mode 100644 index 0000000000000..31253537849a6 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/conversion.go @@ -0,0 +1,26 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + + internal "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + err := scheme.AddConversionFunc((*RunOnceDurationConfig)(nil), (*internal.RunOnceDurationConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + in := a.(*RunOnceDurationConfig) + out := b.(*internal.RunOnceDurationConfig) + out.ActiveDeadlineSecondsOverride = in.ActiveDeadlineSecondsOverride + return nil + }) + if err != nil { + return err + } + return scheme.AddConversionFunc((*internal.RunOnceDurationConfig)(nil), (*RunOnceDurationConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + in := a.(*internal.RunOnceDurationConfig) + out := b.(*RunOnceDurationConfig) + out.ActiveDeadlineSecondsOverride = in.ActiveDeadlineSecondsOverride + return nil + }) +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/doc.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/doc.go new file mode 100644 index 0000000000000..f70b886a67a72 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration + +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/register.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/register.go new file mode 100644 index 0000000000000..b456123c9fab2 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/register.go @@ -0,0 +1,29 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration" +) + +func (obj *RunOnceDurationConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } + +var GroupVersion = schema.GroupVersion{Group: "autoscaling.openshift.io", Version: "v1"} + +var ( + localSchemeBuilder = runtime.NewSchemeBuilder( + addKnownTypes, + runonceduration.Install, + + addConversionFuncs, + ) + Install = localSchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &RunOnceDurationConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/swagger_doc.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/swagger_doc.go new file mode 100644 index 0000000000000..1cb7c3cdb319f --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/swagger_doc.go @@ -0,0 +1,15 @@ +package v1 + +// This file contains methods that can be used by the go-restful package to generate Swagger +// documentation for the object types found in 'types.go' This file is automatically generated +// by hack/update-generated-swagger-descriptions.sh and should be run after a full build of OpenShift. +// ==== DO NOT EDIT THIS FILE MANUALLY ==== + +var map_RunOnceDurationConfig = map[string]string{ + "": "RunOnceDurationConfig is the configuration for the RunOnceDuration plugin. It specifies a maximum value for ActiveDeadlineSeconds for a run-once pod. The project that contains the pod may specify a different setting. That setting will take precedence over the one configured for the plugin here.", + "activeDeadlineSecondsOverride": "ActiveDeadlineSecondsOverride is the maximum value to set on containers of run-once pods Only a positive value is valid. Absence of a value means that the plugin won't make any changes to the pod It is kept this way for compatibility. Only change it in a new version of the API.", +} + +func (RunOnceDurationConfig) SwaggerDoc() map[string]string { + return map_RunOnceDurationConfig +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/types.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/types.go new file mode 100644 index 0000000000000..4cfa3823ba10b --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/types.go @@ -0,0 +1,22 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RunOnceDurationConfig is the configuration for the RunOnceDuration plugin. +// It specifies a maximum value for ActiveDeadlineSeconds for a run-once pod. +// The project that contains the pod may specify a different setting. That setting will +// take precedence over the one configured for the plugin here. +type RunOnceDurationConfig struct { + metav1.TypeMeta `json:",inline"` + + // ActiveDeadlineSecondsOverride is the maximum value to set on containers of run-once pods + // Only a positive value is valid. Absence of a value means that the plugin + // won't make any changes to the pod + // TODO: change the external name of this field to reflect that it is a limit, not an override + // It is kept this way for compatibility. Only change it in a new version of the API. + ActiveDeadlineSecondsOverride *int64 `json:"activeDeadlineSecondsOverride,omitempty" description:"maximum value for activeDeadlineSeconds in run-once pods"` +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/zz_generated.conversion.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/zz_generated.conversion.go new file mode 100644 index 0000000000000..3590bb3ccb167 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/zz_generated.conversion.go @@ -0,0 +1,70 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + unsafe "unsafe" + + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + runonceduration "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*RunOnceDurationConfig)(nil), (*runonceduration.RunOnceDurationConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_RunOnceDurationConfig_To_runonceduration_RunOnceDurationConfig(a.(*RunOnceDurationConfig), b.(*runonceduration.RunOnceDurationConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*runonceduration.RunOnceDurationConfig)(nil), (*RunOnceDurationConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_runonceduration_RunOnceDurationConfig_To_v1_RunOnceDurationConfig(a.(*runonceduration.RunOnceDurationConfig), b.(*RunOnceDurationConfig), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_RunOnceDurationConfig_To_runonceduration_RunOnceDurationConfig(in *RunOnceDurationConfig, out *runonceduration.RunOnceDurationConfig, s conversion.Scope) error { + out.ActiveDeadlineSecondsOverride = (*int64)(unsafe.Pointer(in.ActiveDeadlineSecondsOverride)) + return nil +} + +// Convert_v1_RunOnceDurationConfig_To_runonceduration_RunOnceDurationConfig is an autogenerated conversion function. +func Convert_v1_RunOnceDurationConfig_To_runonceduration_RunOnceDurationConfig(in *RunOnceDurationConfig, out *runonceduration.RunOnceDurationConfig, s conversion.Scope) error { + return autoConvert_v1_RunOnceDurationConfig_To_runonceduration_RunOnceDurationConfig(in, out, s) +} + +func autoConvert_runonceduration_RunOnceDurationConfig_To_v1_RunOnceDurationConfig(in *runonceduration.RunOnceDurationConfig, out *RunOnceDurationConfig, s conversion.Scope) error { + out.ActiveDeadlineSecondsOverride = (*int64)(unsafe.Pointer(in.ActiveDeadlineSecondsOverride)) + return nil +} + +// Convert_runonceduration_RunOnceDurationConfig_To_v1_RunOnceDurationConfig is an autogenerated conversion function. +func Convert_runonceduration_RunOnceDurationConfig_To_v1_RunOnceDurationConfig(in *runonceduration.RunOnceDurationConfig, out *RunOnceDurationConfig, s conversion.Scope) error { + return autoConvert_runonceduration_RunOnceDurationConfig_To_v1_RunOnceDurationConfig(in, out, s) +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..e418b1b545345 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/zz_generated.deepcopy.go @@ -0,0 +1,56 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunOnceDurationConfig) DeepCopyInto(out *RunOnceDurationConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ActiveDeadlineSecondsOverride != nil { + in, out := &in.ActiveDeadlineSecondsOverride, &out.ActiveDeadlineSecondsOverride + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunOnceDurationConfig. +func (in *RunOnceDurationConfig) DeepCopy() *RunOnceDurationConfig { + if in == nil { + return nil + } + out := new(RunOnceDurationConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RunOnceDurationConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation/validation.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation/validation.go new file mode 100644 index 0000000000000..7ddcad869845a --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation/validation.go @@ -0,0 +1,18 @@ +package validation + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration" +) + +// ValidateRunOnceDurationConfig validates the RunOnceDuration plugin configuration +func ValidateRunOnceDurationConfig(config *runonceduration.RunOnceDurationConfig) field.ErrorList { + allErrs := field.ErrorList{} + if config == nil || config.ActiveDeadlineSecondsOverride == nil { + return allErrs + } + if *config.ActiveDeadlineSecondsOverride <= 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("activeDeadlineSecondsOverride"), config.ActiveDeadlineSecondsOverride, "must be greater than 0")) + } + return allErrs +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation/validation_test.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation/validation_test.go new file mode 100644 index 0000000000000..19f6f6d70544b --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation/validation_test.go @@ -0,0 +1,29 @@ +package validation + +import ( + "testing" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration" +) + +func TestRunOnceDurationConfigValidation(t *testing.T) { + // Check invalid duration returns an error + var invalidSecs int64 = -1 + invalidConfig := &runonceduration.RunOnceDurationConfig{ + ActiveDeadlineSecondsOverride: &invalidSecs, + } + errs := ValidateRunOnceDurationConfig(invalidConfig) + if len(errs) == 0 { + t.Errorf("Did not get expected error on invalid config") + } + + // Check that valid duration returns no error + var validSecs int64 = 5 + validConfig := &runonceduration.RunOnceDurationConfig{ + ActiveDeadlineSecondsOverride: &validSecs, + } + errs = ValidateRunOnceDurationConfig(validConfig) + if len(errs) > 0 { + t.Errorf("Unexpected error on valid config") + } +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..7a2f070a725af --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/zz_generated.deepcopy.go @@ -0,0 +1,56 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package runonceduration + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunOnceDurationConfig) DeepCopyInto(out *RunOnceDurationConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ActiveDeadlineSecondsOverride != nil { + in, out := &in.ActiveDeadlineSecondsOverride, &out.ActiveDeadlineSecondsOverride + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunOnceDurationConfig. +func (in *RunOnceDurationConfig) DeepCopy() *RunOnceDurationConfig { + if in == nil { + return nil + } + out := new(RunOnceDurationConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RunOnceDurationConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/admission.go b/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/admission.go new file mode 100644 index 0000000000000..6aed487fdef13 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/admission.go @@ -0,0 +1,348 @@ +package clusterresourceoverride + +import ( + "context" + "fmt" + "io" + "strings" + + "github.com/openshift/library-go/pkg/config/helpers" + v1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1" + + "k8s.io/klog/v2" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" + coreapi "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/plugin/pkg/admission/limitranger" + + api "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/validation" +) + +const ( + clusterResourceOverrideAnnotation = "autoscaling.openshift.io/cluster-resource-override-enabled" + cpuBaseScaleFactor = 1000.0 / (1024.0 * 1024.0 * 1024.0) // 1000 milliCores per 1GiB +) + +var ( + cpuFloor = resource.MustParse("1m") + memFloor = resource.MustParse("1Mi") +) + +func Register(plugins *admission.Plugins) { + plugins.Register(api.PluginName, + func(config io.Reader) (admission.Interface, error) { + pluginConfig, err := ReadConfig(config) + if err != nil { + return nil, err + } + if pluginConfig == nil { + klog.Infof("Admission plugin %q is not configured so it will be disabled.", api.PluginName) + return nil, nil + } + return newClusterResourceOverride(pluginConfig) + }) +} + +type internalConfig struct { + limitCPUToMemoryRatio float64 + cpuRequestToLimitRatio float64 + memoryRequestToLimitRatio float64 +} +type clusterResourceOverridePlugin struct { + *admission.Handler + config *internalConfig + nsLister corev1listers.NamespaceLister + LimitRanger *limitranger.LimitRanger + limitRangesLister corev1listers.LimitRangeLister +} + +var _ = initializer.WantsExternalKubeInformerFactory(&clusterResourceOverridePlugin{}) +var _ = initializer.WantsExternalKubeClientSet(&clusterResourceOverridePlugin{}) +var _ = admission.MutationInterface(&clusterResourceOverridePlugin{}) +var _ = admission.ValidationInterface(&clusterResourceOverridePlugin{}) + +// newClusterResourceOverride returns an admission controller for containers that +// configurably overrides container resource request/limits +func newClusterResourceOverride(config *api.ClusterResourceOverrideConfig) (admission.Interface, error) { + klog.V(2).Infof("%s admission controller loaded with config: %v", api.PluginName, config) + var internal *internalConfig + if config != nil { + internal = &internalConfig{ + limitCPUToMemoryRatio: float64(config.LimitCPUToMemoryPercent) / 100, + cpuRequestToLimitRatio: float64(config.CPURequestToLimitPercent) / 100, + memoryRequestToLimitRatio: float64(config.MemoryRequestToLimitPercent) / 100, + } + } + + limitRanger, err := limitranger.NewLimitRanger(nil) + if err != nil { + return nil, err + } + + return &clusterResourceOverridePlugin{ + Handler: admission.NewHandler(admission.Create), + config: internal, + LimitRanger: limitRanger, + }, nil +} + +func (d *clusterResourceOverridePlugin) SetExternalKubeClientSet(c kubernetes.Interface) { + d.LimitRanger.SetExternalKubeClientSet(c) +} + +func (d *clusterResourceOverridePlugin) SetExternalKubeInformerFactory(kubeInformers informers.SharedInformerFactory) { + d.LimitRanger.SetExternalKubeInformerFactory(kubeInformers) + d.limitRangesLister = kubeInformers.Core().V1().LimitRanges().Lister() + d.nsLister = kubeInformers.Core().V1().Namespaces().Lister() +} + +func ReadConfig(configFile io.Reader) (*api.ClusterResourceOverrideConfig, error) { + obj, err := helpers.ReadYAMLToInternal(configFile, api.Install, v1.Install) + if err != nil { + klog.V(5).Infof("%s error reading config: %v", api.PluginName, err) + return nil, err + } + if obj == nil { + return nil, nil + } + config, ok := obj.(*api.ClusterResourceOverrideConfig) + if !ok { + return nil, fmt.Errorf("unexpected config object: %#v", obj) + } + klog.V(5).Infof("%s config is: %v", api.PluginName, config) + if errs := validation.Validate(config); len(errs) > 0 { + return nil, errs.ToAggregate() + } + + return config, nil +} + +func (a *clusterResourceOverridePlugin) ValidateInitialization() error { + if a.nsLister == nil { + return fmt.Errorf("%s did not get a namespace lister", api.PluginName) + } + return a.LimitRanger.ValidateInitialization() +} + +// this a real shame to be special cased. +var ( + forbiddenNames = []string{"openshift", "kubernetes", "kube"} + forbiddenPrefixes = []string{"openshift-", "kubernetes-", "kube-"} +) + +func isExemptedNamespace(name string) bool { + for _, s := range forbiddenNames { + if name == s { + return true + } + } + for _, s := range forbiddenPrefixes { + if strings.HasPrefix(name, s) { + return true + } + } + return false +} + +func (a *clusterResourceOverridePlugin) Admit(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) error { + return a.admit(ctx, attr, true, o) +} + +func (a *clusterResourceOverridePlugin) Validate(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) error { + return a.admit(ctx, attr, false, o) +} + +// TODO this will need to update when we have pod requests/limits +func (a *clusterResourceOverridePlugin) admit(ctx context.Context, attr admission.Attributes, mutationAllowed bool, o admission.ObjectInterfaces) error { + klog.V(6).Infof("%s admission controller is invoked", api.PluginName) + if a.config == nil || attr.GetResource().GroupResource() != coreapi.Resource("pods") || attr.GetSubresource() != "" { + return nil // not applicable + } + pod, ok := attr.GetObject().(*coreapi.Pod) + if !ok { + return admission.NewForbidden(attr, fmt.Errorf("unexpected object: %#v", attr.GetObject())) + } + klog.V(5).Infof("%s is looking at creating pod %s in project %s", api.PluginName, pod.Name, attr.GetNamespace()) + + // allow annotations on project to override + ns, err := a.nsLister.Get(attr.GetNamespace()) + if err != nil { + klog.Warningf("%s got an error retrieving namespace: %v", api.PluginName, err) + return admission.NewForbidden(attr, err) // this should not happen though + } + + projectEnabledPlugin, exists := ns.Annotations[clusterResourceOverrideAnnotation] + if exists && projectEnabledPlugin != "true" { + klog.V(5).Infof("%s is disabled for project %s", api.PluginName, attr.GetNamespace()) + return nil // disabled for this project, do nothing + } + + if isExemptedNamespace(ns.Name) { + klog.V(5).Infof("%s is skipping exempted project %s", api.PluginName, attr.GetNamespace()) + return nil // project is exempted, do nothing + } + + namespaceLimits := []*corev1.LimitRange{} + + if a.limitRangesLister != nil { + limits, err := a.limitRangesLister.LimitRanges(attr.GetNamespace()).List(labels.Everything()) + if err != nil { + return err + } + namespaceLimits = limits + } + + // Don't mutate resource requirements below the namespace + // limit minimums. + nsCPUFloor := minResourceLimits(namespaceLimits, corev1.ResourceCPU) + nsMemFloor := minResourceLimits(namespaceLimits, corev1.ResourceMemory) + + // Reuse LimitRanger logic to apply limit/req defaults from the project. Ignore validation + // errors, assume that LimitRanger will run after this plugin to validate. + klog.V(5).Infof("%s: initial pod limits are: %#v", api.PluginName, pod.Spec) + if err := a.LimitRanger.Admit(ctx, attr, o); err != nil { + klog.V(5).Infof("%s: error from LimitRanger: %#v", api.PluginName, err) + } + klog.V(5).Infof("%s: pod limits after LimitRanger: %#v", api.PluginName, pod.Spec) + for i := range pod.Spec.InitContainers { + if err := updateContainerResources(a.config, &pod.Spec.InitContainers[i], nsCPUFloor, nsMemFloor, mutationAllowed); err != nil { + return admission.NewForbidden(attr, fmt.Errorf("spec.initContainers[%d].%v", i, err)) + } + } + for i := range pod.Spec.Containers { + if err := updateContainerResources(a.config, &pod.Spec.Containers[i], nsCPUFloor, nsMemFloor, mutationAllowed); err != nil { + return admission.NewForbidden(attr, fmt.Errorf("spec.containers[%d].%v", i, err)) + } + } + klog.V(5).Infof("%s: pod limits after overrides are: %#v", api.PluginName, pod.Spec) + return nil +} + +func updateContainerResources(config *internalConfig, container *coreapi.Container, nsCPUFloor, nsMemFloor *resource.Quantity, mutationAllowed bool) error { + resources := container.Resources + memLimit, memFound := resources.Limits[coreapi.ResourceMemory] + if memFound && config.memoryRequestToLimitRatio != 0 { + // memory is measured in whole bytes. + // the plugin rounds down to the nearest MiB rather than bytes to improve ease of use for end-users. + amount := memLimit.Value() * int64(config.memoryRequestToLimitRatio*100) / 100 + // TODO: move into resource.Quantity + var mod int64 + switch memLimit.Format { + case resource.BinarySI: + mod = 1024 * 1024 + default: + mod = 1000 * 1000 + } + if rem := amount % mod; rem != 0 { + amount = amount - rem + } + q := resource.NewQuantity(int64(amount), memLimit.Format) + if memFloor.Cmp(*q) > 0 { + clone := memFloor.DeepCopy() + q = &clone + } + if nsMemFloor != nil && q.Cmp(*nsMemFloor) < 0 { + klog.V(5).Infof("%s: %s pod limit %q below namespace limit; setting limit to %q", api.PluginName, corev1.ResourceMemory, q.String(), nsMemFloor.String()) + clone := nsMemFloor.DeepCopy() + q = &clone + } + if err := applyQuantity(resources.Requests, corev1.ResourceMemory, *q, mutationAllowed); err != nil { + return fmt.Errorf("resources.requests.%s %v", corev1.ResourceMemory, err) + } + } + if memFound && config.limitCPUToMemoryRatio != 0 { + amount := float64(memLimit.Value()) * config.limitCPUToMemoryRatio * cpuBaseScaleFactor + q := resource.NewMilliQuantity(int64(amount), resource.DecimalSI) + if cpuFloor.Cmp(*q) > 0 { + clone := cpuFloor.DeepCopy() + q = &clone + } + if nsCPUFloor != nil && q.Cmp(*nsCPUFloor) < 0 { + klog.V(5).Infof("%s: %s pod limit %q below namespace limit; setting limit to %q", api.PluginName, corev1.ResourceCPU, q.String(), nsCPUFloor.String()) + clone := nsCPUFloor.DeepCopy() + q = &clone + } + if err := applyQuantity(resources.Limits, corev1.ResourceCPU, *q, mutationAllowed); err != nil { + return fmt.Errorf("resources.limits.%s %v", corev1.ResourceCPU, err) + } + } + + cpuLimit, cpuFound := resources.Limits[coreapi.ResourceCPU] + if cpuFound && config.cpuRequestToLimitRatio != 0 { + amount := float64(cpuLimit.MilliValue()) * config.cpuRequestToLimitRatio + q := resource.NewMilliQuantity(int64(amount), cpuLimit.Format) + if cpuFloor.Cmp(*q) > 0 { + clone := cpuFloor.DeepCopy() + q = &clone + } + if nsCPUFloor != nil && q.Cmp(*nsCPUFloor) < 0 { + klog.V(5).Infof("%s: %s pod limit %q below namespace limit; setting limit to %q", api.PluginName, corev1.ResourceCPU, q.String(), nsCPUFloor.String()) + clone := nsCPUFloor.DeepCopy() + q = &clone + } + if err := applyQuantity(resources.Requests, corev1.ResourceCPU, *q, mutationAllowed); err != nil { + return fmt.Errorf("resources.requests.%s %v", corev1.ResourceCPU, err) + } + } + + return nil +} + +func applyQuantity(l coreapi.ResourceList, r corev1.ResourceName, v resource.Quantity, mutationAllowed bool) error { + if mutationAllowed { + l[coreapi.ResourceName(r)] = v + return nil + } + + if oldValue, ok := l[coreapi.ResourceName(r)]; !ok { + return fmt.Errorf("mutated, expected: %v, now absent", v) + } else if oldValue.Cmp(v) != 0 { + return fmt.Errorf("mutated, expected: %v, got %v", v, oldValue) + } + + return nil +} + +// minResourceLimits finds the Min limit for resourceName. Nil is +// returned if limitRanges is empty or limits contains no resourceName +// limits. +func minResourceLimits(limitRanges []*corev1.LimitRange, resourceName corev1.ResourceName) *resource.Quantity { + limits := []*resource.Quantity{} + + for _, limitRange := range limitRanges { + for _, limit := range limitRange.Spec.Limits { + if limit.Type == corev1.LimitTypeContainer { + if limit, found := limit.Min[resourceName]; found { + clone := limit.DeepCopy() + limits = append(limits, &clone) + } + } + } + } + + if len(limits) == 0 { + return nil + } + + return minQuantity(limits) +} + +func minQuantity(quantities []*resource.Quantity) *resource.Quantity { + min := quantities[0].DeepCopy() + + for i := range quantities { + if quantities[i].Cmp(min) < 0 { + min = quantities[i].DeepCopy() + } + } + + return &min +} diff --git a/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/admission_test.go b/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/admission_test.go new file mode 100644 index 0000000000000..d1c54bb140aae --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/admission_test.go @@ -0,0 +1,507 @@ +package clusterresourceoverride + +import ( + "bytes" + "context" + "fmt" + "io" + "reflect" + "testing" + + "github.com/openshift/library-go/pkg/config/helpers" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/user" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride" + clusterresourceoverridev1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/validation" + kapi "k8s.io/kubernetes/pkg/apis/core" +) + +const ( + yamlConfig = ` +apiVersion: autoscaling.openshift.io/v1 +kind: ClusterResourceOverrideConfig +limitCPUToMemoryPercent: 100 +cpuRequestToLimitPercent: 10 +memoryRequestToLimitPercent: 25 +` + invalidConfig = ` +apiVersion: autoscaling.openshift.io/v1 +kind: ClusterResourceOverrideConfig +cpuRequestToLimitPercent: 200 +` + invalidConfig2 = ` +apiVersion: autoscaling.openshift.io/v1 +kind: ClusterResourceOverrideConfig +` +) + +var ( + deserializedYamlConfig = &clusterresourceoverride.ClusterResourceOverrideConfig{ + LimitCPUToMemoryPercent: 100, + CPURequestToLimitPercent: 10, + MemoryRequestToLimitPercent: 25, + } +) + +func TestConfigReader(t *testing.T) { + initialConfig := testConfig(10, 20, 30) + serializedConfig, serializationErr := helpers.WriteYAML(initialConfig, clusterresourceoverridev1.Install) + if serializationErr != nil { + t.Fatalf("WriteYAML: config serialize failed: %v", serializationErr) + } + + tests := []struct { + name string + config io.Reader + expectErr bool + expectNil bool + expectInvalid bool + expectedConfig *clusterresourceoverride.ClusterResourceOverrideConfig + }{ + { + name: "process nil config", + config: nil, + expectNil: true, + }, { + name: "deserialize initialConfig yaml", + config: bytes.NewReader(serializedConfig), + expectedConfig: initialConfig, + }, { + name: "completely broken config", + config: bytes.NewReader([]byte("asdfasdfasdF")), + expectErr: true, + }, { + name: "deserialize yamlConfig", + config: bytes.NewReader([]byte(yamlConfig)), + expectedConfig: deserializedYamlConfig, + }, { + name: "choke on out-of-bounds ratio", + config: bytes.NewReader([]byte(invalidConfig)), + expectInvalid: true, + expectErr: true, + }, { + name: "complain about no settings", + config: bytes.NewReader([]byte(invalidConfig2)), + expectInvalid: true, + expectErr: true, + }, + } + for _, test := range tests { + config, err := ReadConfig(test.config) + if test.expectErr && err == nil { + t.Errorf("%s: expected error", test.name) + } else if !test.expectErr && err != nil { + t.Errorf("%s: expected no error, saw %v", test.name, err) + } + if err == nil { + if test.expectNil && config != nil { + t.Errorf("%s: expected nil config, but saw: %v", test.name, config) + } else if !test.expectNil && config == nil { + t.Errorf("%s: expected config, but got nil", test.name) + } + } + if config != nil { + if test.expectedConfig != nil && *test.expectedConfig != *config { + t.Errorf("%s: expected %v from reader, but got %v", test.name, test.expectErr, config) + } + if err := validation.Validate(config); test.expectInvalid && len(err) == 0 { + t.Errorf("%s: expected validation to fail, but it passed", test.name) + } else if !test.expectInvalid && len(err) > 0 { + t.Errorf("%s: expected validation to pass, but it failed with %v", test.name, err) + } + } + } +} + +func TestLimitRequestAdmission(t *testing.T) { + tests := []struct { + name string + config *clusterresourceoverride.ClusterResourceOverrideConfig + pod *kapi.Pod + expectedMemRequest resource.Quantity + expectedCpuLimit resource.Quantity + expectedCpuRequest resource.Quantity + namespace *corev1.Namespace + namespaceLimits []*corev1.LimitRange + }{ + { + name: "ignore pods that have no memory limit specified", + config: testConfig(100, 50, 50), + pod: testBestEffortPod(), + expectedMemRequest: resource.MustParse("0"), + expectedCpuLimit: resource.MustParse("0"), + expectedCpuRequest: resource.MustParse("0"), + namespace: fakeNamespace(true), + }, + { + name: "with namespace limits, ignore pods that have no memory limit specified", + config: testConfig(100, 50, 50), + pod: testBestEffortPod(), + expectedMemRequest: resource.MustParse("0"), + expectedCpuLimit: resource.MustParse("0"), + expectedCpuRequest: resource.MustParse("0"), + namespace: fakeNamespace(true), + namespaceLimits: []*corev1.LimitRange{ + fakeMinCPULimitRange("567m"), + fakeMinCPULimitRange("678m"), + fakeMinMemoryLimitRange("700Gi"), + fakeMinMemoryLimitRange("456Gi"), + }, + }, + { + name: "test floor for memory and cpu", + config: testConfig(100, 50, 50), + pod: testPod("1Mi", "0", "0", "0"), + expectedMemRequest: resource.MustParse("1Mi"), + expectedCpuLimit: resource.MustParse("1m"), + expectedCpuRequest: resource.MustParse("1m"), + namespace: fakeNamespace(true), + }, + { + name: "with namespace limits, test floor for memory and cpu", + config: testConfig(100, 50, 50), + pod: testPod("1Mi", "0", "0", "0"), + expectedMemRequest: resource.MustParse("456Gi"), + expectedCpuLimit: resource.MustParse("567m"), + expectedCpuRequest: resource.MustParse("567m"), + namespace: fakeNamespace(true), + namespaceLimits: []*corev1.LimitRange{ + fakeMinCPULimitRange("567m"), + fakeMinCPULimitRange("678m"), + fakeMinMemoryLimitRange("700Gi"), + fakeMinMemoryLimitRange("456Gi"), + }, + }, + { + name: "nil config", + config: nil, + pod: testPod("1", "1", "1", "1"), + expectedMemRequest: resource.MustParse("1"), + expectedCpuLimit: resource.MustParse("1"), + expectedCpuRequest: resource.MustParse("1"), + namespace: fakeNamespace(true), + }, + { + name: "with namespace limits, nil config", + config: nil, + pod: testPod("1", "1", "1", "1"), + expectedMemRequest: resource.MustParse("1"), + expectedCpuLimit: resource.MustParse("1"), + expectedCpuRequest: resource.MustParse("1"), + namespace: fakeNamespace(true), + namespaceLimits: []*corev1.LimitRange{ + fakeMinCPULimitRange("567m"), + fakeMinCPULimitRange("678m"), + fakeMinMemoryLimitRange("700Gi"), + fakeMinMemoryLimitRange("456Gi"), + }, + }, + { + name: "all values are adjusted", + config: testConfig(100, 50, 50), + pod: testPod("1Gi", "0", "2000m", "0"), + expectedMemRequest: resource.MustParse("512Mi"), + expectedCpuLimit: resource.MustParse("1"), + expectedCpuRequest: resource.MustParse("500m"), + namespace: fakeNamespace(true), + }, + { + name: "with namespace limits, all values are adjusted to floor of namespace limits", + config: testConfig(100, 50, 50), + pod: testPod("1Gi", "0", "2000m", "0"), + expectedMemRequest: resource.MustParse("456Gi"), + expectedCpuLimit: resource.MustParse("10567m"), + expectedCpuRequest: resource.MustParse("10567m"), + namespace: fakeNamespace(true), + namespaceLimits: []*corev1.LimitRange{ + fakeMinCPULimitRange("10567m"), + fakeMinCPULimitRange("20678m"), + fakeMinMemoryLimitRange("700Gi"), + fakeMinMemoryLimitRange("456Gi"), + }, + }, + { + name: "just requests are adjusted", + config: testConfig(0, 50, 50), + pod: testPod("10Mi", "0", "50m", "0"), + expectedMemRequest: resource.MustParse("5Mi"), + expectedCpuLimit: resource.MustParse("50m"), + expectedCpuRequest: resource.MustParse("25m"), + namespace: fakeNamespace(true), + }, + { + name: "with namespace limits, all requests are adjusted to floor of namespace limits", + config: testConfig(0, 50, 50), + pod: testPod("10Mi", "0", "50m", "0"), + expectedMemRequest: resource.MustParse("456Gi"), + expectedCpuLimit: resource.MustParse("50m"), + expectedCpuRequest: resource.MustParse("10567m"), + namespace: fakeNamespace(true), + namespaceLimits: []*corev1.LimitRange{ + fakeMinCPULimitRange("10567m"), + fakeMinCPULimitRange("20678m"), + fakeMinMemoryLimitRange("700Gi"), + fakeMinMemoryLimitRange("456Gi"), + }, + }, + { + name: "project annotation disables overrides", + config: testConfig(0, 50, 50), + pod: testPod("10Mi", "0", "50m", "0"), + expectedMemRequest: resource.MustParse("0"), + expectedCpuLimit: resource.MustParse("50m"), + expectedCpuRequest: resource.MustParse("0"), + namespace: fakeNamespace(false), + }, + { + name: "with namespace limits, project annotation disables overrides", + config: testConfig(0, 50, 50), + pod: testPod("10Mi", "0", "50m", "0"), + expectedMemRequest: resource.MustParse("0"), + expectedCpuLimit: resource.MustParse("50m"), + expectedCpuRequest: resource.MustParse("0"), + namespace: fakeNamespace(false), + namespaceLimits: []*corev1.LimitRange{ + fakeMinCPULimitRange("10567m"), + fakeMinCPULimitRange("20678m"), + fakeMinMemoryLimitRange("700Gi"), + fakeMinMemoryLimitRange("456Gi"), + }, + }, + { + name: "large values don't overflow", + config: testConfig(100, 50, 50), + pod: testPod("1Ti", "0", "0", "0"), + expectedMemRequest: resource.MustParse("512Gi"), + expectedCpuLimit: resource.MustParse("1024"), + expectedCpuRequest: resource.MustParse("512"), + namespace: fakeNamespace(true), + }, + { + name: "little values mess things up", + config: testConfig(500, 10, 10), + pod: testPod("1.024Mi", "0", "0", "0"), + expectedMemRequest: resource.MustParse("1Mi"), + expectedCpuLimit: resource.MustParse("5m"), + expectedCpuRequest: resource.MustParse("1m"), + namespace: fakeNamespace(true), + }, + { + name: "test fractional memory requests round up", + config: testConfig(500, 10, 60), + pod: testPod("512Mi", "0", "0", "0"), + expectedMemRequest: resource.MustParse("307Mi"), + expectedCpuLimit: resource.MustParse("2.5"), + expectedCpuRequest: resource.MustParse("250m"), + namespace: fakeNamespace(true), + }, + { + name: "test only containers types are considered with namespace limits", + config: testConfig(100, 50, 50), + pod: testPod("1Gi", "0", "2000m", "0"), + expectedMemRequest: resource.MustParse("512Mi"), + expectedCpuLimit: resource.MustParse("1"), + expectedCpuRequest: resource.MustParse("500m"), + namespace: fakeNamespace(true), + namespaceLimits: []*corev1.LimitRange{ + fakeMinStorageLimitRange("1567Mi"), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + c, err := newClusterResourceOverride(test.config) + if err != nil { + t.Fatalf("%s: config de/serialize failed: %v", test.name, err) + } + // Override LimitRanger with limits from test case + c.(*clusterResourceOverridePlugin).limitRangesLister = fakeLimitRangeLister{ + namespaceLister: fakeLimitRangeNamespaceLister{ + limits: test.namespaceLimits, + }, + } + c.(*clusterResourceOverridePlugin).nsLister = fakeNamespaceLister(test.namespace) + attrs := admission.NewAttributesRecord(test.pod, nil, schema.GroupVersionKind{}, test.namespace.Name, "name", kapi.Resource("pods").WithVersion("version"), "", admission.Create, nil, false, fakeUser()) + clone := test.pod.DeepCopy() + if err = c.(admission.MutationInterface).Admit(context.TODO(), attrs, nil); err != nil { + t.Fatalf("%s: admission controller returned error: %v", test.name, err) + } + if err = c.(admission.ValidationInterface).Validate(context.TODO(), attrs, nil); err != nil { + t.Fatalf("%s: admission controller returned error: %v", test.name, err) + } + + if !reflect.DeepEqual(test.pod, clone) { + attrs := admission.NewAttributesRecord(clone, nil, schema.GroupVersionKind{}, test.namespace.Name, "name", kapi.Resource("pods").WithVersion("version"), "", admission.Create, nil, false, fakeUser()) + if err = c.(admission.ValidationInterface).Validate(context.TODO(), attrs, nil); err == nil { + t.Fatalf("%s: admission controller returned no error, but should", test.name) + } + } + + resources := test.pod.Spec.InitContainers[0].Resources // only test one container + if actual := resources.Requests[kapi.ResourceMemory]; test.expectedMemRequest.Cmp(actual) != 0 { + t.Errorf("%s: memory requests do not match; %v should be %v", test.name, actual, test.expectedMemRequest) + } + if actual := resources.Requests[kapi.ResourceCPU]; test.expectedCpuRequest.Cmp(actual) != 0 { + t.Errorf("%s: cpu requests do not match; %v should be %v", test.name, actual, test.expectedCpuRequest) + } + if actual := resources.Limits[kapi.ResourceCPU]; test.expectedCpuLimit.Cmp(actual) != 0 { + t.Errorf("%s: cpu limits do not match; %v should be %v", test.name, actual, test.expectedCpuLimit) + } + + resources = test.pod.Spec.Containers[0].Resources // only test one container + if actual := resources.Requests[kapi.ResourceMemory]; test.expectedMemRequest.Cmp(actual) != 0 { + t.Errorf("%s: memory requests do not match; %v should be %v", test.name, actual, test.expectedMemRequest) + } + if actual := resources.Requests[kapi.ResourceCPU]; test.expectedCpuRequest.Cmp(actual) != 0 { + t.Errorf("%s: cpu requests do not match; %v should be %v", test.name, actual, test.expectedCpuRequest) + } + if actual := resources.Limits[kapi.ResourceCPU]; test.expectedCpuLimit.Cmp(actual) != 0 { + t.Errorf("%s: cpu limits do not match; %v should be %v", test.name, actual, test.expectedCpuLimit) + } + }) + } +} + +func testBestEffortPod() *kapi.Pod { + return &kapi.Pod{ + Spec: kapi.PodSpec{ + InitContainers: []kapi.Container{ + { + Resources: kapi.ResourceRequirements{}, + }, + }, + Containers: []kapi.Container{ + { + Resources: kapi.ResourceRequirements{}, + }, + }, + }, + } +} + +func testPod(memLimit string, memRequest string, cpuLimit string, cpuRequest string) *kapi.Pod { + return &kapi.Pod{ + Spec: kapi.PodSpec{ + InitContainers: []kapi.Container{ + { + Resources: kapi.ResourceRequirements{ + Limits: kapi.ResourceList{ + kapi.ResourceCPU: resource.MustParse(cpuLimit), + kapi.ResourceMemory: resource.MustParse(memLimit), + }, + Requests: kapi.ResourceList{ + kapi.ResourceCPU: resource.MustParse(cpuRequest), + kapi.ResourceMemory: resource.MustParse(memRequest), + }, + }, + }, + }, + Containers: []kapi.Container{ + { + Resources: kapi.ResourceRequirements{ + Limits: kapi.ResourceList{ + kapi.ResourceCPU: resource.MustParse(cpuLimit), + kapi.ResourceMemory: resource.MustParse(memLimit), + }, + Requests: kapi.ResourceList{ + kapi.ResourceCPU: resource.MustParse(cpuRequest), + kapi.ResourceMemory: resource.MustParse(memRequest), + }, + }, + }, + }, + }, + } +} + +func fakeUser() user.Info { + return &user.DefaultInfo{ + Name: "testuser", + } +} + +var nsIndex = 0 + +func fakeNamespace(pluginEnabled bool) *corev1.Namespace { + nsIndex++ + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("fakeNS%d", nsIndex), + Annotations: map[string]string{}, + }, + } + if !pluginEnabled { + ns.Annotations[clusterResourceOverrideAnnotation] = "false" + } + return ns +} + +func fakeNamespaceLister(ns *corev1.Namespace) corev1listers.NamespaceLister { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + indexer.Add(ns) + return corev1listers.NewNamespaceLister(indexer) +} + +func testConfig(lc2mr int64, cr2lr int64, mr2lr int64) *clusterresourceoverride.ClusterResourceOverrideConfig { + return &clusterresourceoverride.ClusterResourceOverrideConfig{ + LimitCPUToMemoryPercent: lc2mr, + CPURequestToLimitPercent: cr2lr, + MemoryRequestToLimitPercent: mr2lr, + } +} + +func fakeMinLimitRange(limitType corev1.LimitType, resourceType corev1.ResourceName, limits ...string) *corev1.LimitRange { + r := &corev1.LimitRange{} + + for i := range limits { + rl := corev1.ResourceList{} + rl[resourceType] = resource.MustParse(limits[i]) + r.Spec.Limits = append(r.Spec.Limits, + corev1.LimitRangeItem{ + Type: limitType, + Min: rl, + }, + ) + } + + return r +} + +func fakeMinMemoryLimitRange(limits ...string) *corev1.LimitRange { + return fakeMinLimitRange(corev1.LimitTypeContainer, corev1.ResourceMemory, limits...) +} + +func fakeMinCPULimitRange(limits ...string) *corev1.LimitRange { + return fakeMinLimitRange(corev1.LimitTypeContainer, corev1.ResourceCPU, limits...) +} + +func fakeMinStorageLimitRange(limits ...string) *corev1.LimitRange { + return fakeMinLimitRange(corev1.LimitTypePersistentVolumeClaim, corev1.ResourceStorage, limits...) +} + +type fakeLimitRangeLister struct { + corev1listers.LimitRangeLister + namespaceLister fakeLimitRangeNamespaceLister +} + +type fakeLimitRangeNamespaceLister struct { + corev1listers.LimitRangeNamespaceLister + limits []*corev1.LimitRange +} + +func (f fakeLimitRangeLister) LimitRanges(namespace string) corev1listers.LimitRangeNamespaceLister { + return f.namespaceLister +} + +func (f fakeLimitRangeNamespaceLister) List(selector labels.Selector) ([]*corev1.LimitRange, error) { + return f.limits, nil +} diff --git a/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/doc.go b/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/doc.go new file mode 100644 index 0000000000000..aaf2176af054a --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/doc.go @@ -0,0 +1,8 @@ +package clusterresourceoverride + +// The ClusterResourceOverride plugin is only active when admission control config is supplied for it. +// The plugin allows administrators to override user-provided container request/limit values +// in order to control overcommit and optionally pin CPU to memory. +// The plugin's actions can be disabled per-project with the project annotation +// autoscaling.openshift.io/cluster-resource-override-enabled="false", so cluster admins +// can exempt infrastructure projects and such from the overrides. diff --git a/openshift-kube-apiserver/admission/autoscaling/managednode/admission.go b/openshift-kube-apiserver/admission/autoscaling/managednode/admission.go new file mode 100644 index 0000000000000..d89c6423a05eb --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/managednode/admission.go @@ -0,0 +1,136 @@ +package managednode + +import ( + "context" + "fmt" + "io" + "strings" + + configv1 "github.com/openshift/api/config/v1" + configv1informer "github.com/openshift/client-go/config/informers/externalversions/config/v1" + configv1listers "github.com/openshift/client-go/config/listers/config/v1" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/kubernetes/pkg/kubelet/managed" + + corev1 "k8s.io/api/core/v1" + coreapi "k8s.io/kubernetes/pkg/apis/core" + + "k8s.io/client-go/kubernetes" +) + +const ( + PluginName = "autoscaling.openshift.io/ManagedNode" + // infraClusterName contains the name of the cluster infrastructure resource + infraClusterName = "cluster" +) + +var _ = initializer.WantsExternalKubeClientSet(&managedNodeValidate{}) +var _ = admission.ValidationInterface(&managedNodeValidate{}) +var _ = WantsInfraInformer(&managedNodeValidate{}) + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, + func(_ io.Reader) (admission.Interface, error) { + return &managedNodeValidate{ + Handler: admission.NewHandler(admission.Create, admission.Update), + }, nil + }) +} + +type managedNodeValidate struct { + *admission.Handler + client kubernetes.Interface + infraConfigLister configv1listers.InfrastructureLister + infraConfigListSynced func() bool +} + +// SetExternalKubeClientSet implements the WantsExternalKubeClientSet interface. +func (a *managedNodeValidate) SetExternalKubeClientSet(client kubernetes.Interface) { + a.client = client +} + +func (a *managedNodeValidate) SetInfraInformer(informer configv1informer.InfrastructureInformer) { + a.infraConfigLister = informer.Lister() + a.infraConfigListSynced = informer.Informer().HasSynced +} + +func (a *managedNodeValidate) ValidateInitialization() error { + if a.client == nil { + return fmt.Errorf("%s plugin needs a kubernetes client", PluginName) + } + if a.infraConfigLister == nil { + return fmt.Errorf("%s did not get a config infrastructure lister", PluginName) + } + if a.infraConfigListSynced == nil { + return fmt.Errorf("%s plugin needs a config infrastructure lister synced", PluginName) + } + return nil +} + +func (a *managedNodeValidate) Validate(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) (err error) { + if attr.GetResource().GroupResource() != corev1.Resource("nodes") || attr.GetSubresource() != "" { + return nil + } + + node, ok := attr.GetObject().(*coreapi.Node) + if !ok { + return admission.NewForbidden(attr, fmt.Errorf("unexpected object: %#v", attr.GetResource())) + } + + // infraConfigListSynced is expected to be thread-safe since the underlying call is to the standard + // informer HasSynced() function which is thread-safe. + if !a.infraConfigListSynced() { + return admission.NewForbidden(attr, fmt.Errorf("%s infra config cache not synchronized", PluginName)) + } + + clusterInfra, err := a.infraConfigLister.Get(infraClusterName) + if err != nil { + return admission.NewForbidden(attr, err) // can happen due to informer latency + } + + // Check if we are in CPU Partitioning mode for AllNodes + allErrs := validateClusterCPUPartitioning(clusterInfra.Status, node) + if len(allErrs) == 0 { + return nil + } + return errors.NewInvalid(attr.GetKind().GroupKind(), node.Name, allErrs) +} + +// validateClusterCPUPartitioning Make sure that we only check nodes when CPU Partitioning is turned on. +// We also need to account for Single Node upgrades, during that initial upgrade, NTO will update this field during +// upgrade to make it authoritative from that point on. A roll back will revert an SingleNode cluster back to it's normal cycle. +// Other installations will have this field set at install time, and can not be turned off. +// +// If CPUPartitioning == AllNodes and is not empty value, check nodes +func validateClusterCPUPartitioning(infraStatus configv1.InfrastructureStatus, node *coreapi.Node) field.ErrorList { + errorMessage := "node does not contain resource information, this is required for clusters with workload partitioning enabled" + var allErrs field.ErrorList + + if infraStatus.CPUPartitioning == configv1.CPUPartitioningAllNodes { + if !containsCPUResource(node.Status.Capacity) { + allErrs = append(allErrs, getNodeInvalidWorkloadResourceError("capacity", errorMessage)) + } + if !containsCPUResource(node.Status.Allocatable) { + allErrs = append(allErrs, getNodeInvalidWorkloadResourceError("allocatable", errorMessage)) + } + } + + return allErrs +} + +func containsCPUResource(resources coreapi.ResourceList) bool { + for k := range resources { + if strings.Contains(k.String(), managed.WorkloadsCapacitySuffix) { + return true + } + } + return false +} + +func getNodeInvalidWorkloadResourceError(resourcePool, message string) *field.Error { + return field.Required(field.NewPath("status", resourcePool, managed.WorkloadsCapacitySuffix), message) +} diff --git a/openshift-kube-apiserver/admission/autoscaling/managednode/admission_test.go b/openshift-kube-apiserver/admission/autoscaling/managednode/admission_test.go new file mode 100644 index 0000000000000..8a1c0157a5d5d --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/managednode/admission_test.go @@ -0,0 +1,128 @@ +package managednode + +import ( + "context" + "fmt" + "testing" + + configv1 "github.com/openshift/api/config/v1" + + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" + + configv1listers "github.com/openshift/client-go/config/listers/config/v1" + + corev1 "k8s.io/api/core/v1" + kapi "k8s.io/kubernetes/pkg/apis/core" +) + +const ( + managedCapacityLabel = "management.workload.openshift.io/cores" +) + +func TestAdmit(t *testing.T) { + tests := []struct { + name string + node *corev1.Node + infra *configv1.Infrastructure + expectedError error + }{ + { + name: "should succeed when CPU partitioning is set to AllNodes", + node: testNodeWithManagementResource(true), + infra: testClusterInfra(configv1.CPUPartitioningAllNodes), + }, + { + name: "should succeed when CPU partitioning is set to None", + node: testNodeWithManagementResource(true), + infra: testClusterInfra(configv1.CPUPartitioningNone), + }, + { + name: "should fail when nodes don't have capacity", + node: testNodeWithManagementResource(false), + infra: testClusterInfra(configv1.CPUPartitioningAllNodes), + expectedError: fmt.Errorf("node does not contain resource information, this is required for clusters with workload partitioning enabled"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + m, err := getMockNode(test.infra) + if err != nil { + t.Fatalf("%s: failed to get mock managementNode: %v", test.name, err) + } + + attrs := admission.NewAttributesRecord( + test.node, nil, schema.GroupVersionKind{}, + test.node.Namespace, test.node.Name, kapi.Resource("nodes").WithVersion("version"), "", + admission.Create, nil, false, fakeUser()) + err = m.Validate(context.TODO(), attrs, nil) + + if err == nil && test.expectedError != nil { + t.Fatalf("%s: the expected error %v, got nil", test.name, test.expectedError) + } + }) + } +} + +func testNodeWithManagementResource(capacity bool) *corev1.Node { + q := resource.NewQuantity(16000, resource.DecimalSI) + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "managed-node", + }, + } + if capacity { + node.Status.Capacity = corev1.ResourceList{ + managedCapacityLabel: *q, + } + } + return node +} + +func testClusterInfra(mode configv1.CPUPartitioningMode) *configv1.Infrastructure { + return &configv1.Infrastructure{ + ObjectMeta: metav1.ObjectMeta{ + Name: infraClusterName, + }, + Status: configv1.InfrastructureStatus{ + APIServerURL: "test", + ControlPlaneTopology: configv1.HighlyAvailableTopologyMode, + InfrastructureTopology: configv1.HighlyAvailableTopologyMode, + CPUPartitioning: mode, + }, + } +} + +func fakeUser() user.Info { + return &user.DefaultInfo{ + Name: "testuser", + } +} + +func getMockNode(infra *configv1.Infrastructure) (*managedNodeValidate, error) { + m := &managedNodeValidate{ + Handler: admission.NewHandler(admission.Create), + client: &fake.Clientset{}, + infraConfigLister: fakeInfraConfigLister(infra), + infraConfigListSynced: func() bool { return true }, + } + if err := m.ValidateInitialization(); err != nil { + return nil, err + } + + return m, nil +} + +func fakeInfraConfigLister(infra *configv1.Infrastructure) configv1listers.InfrastructureLister { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + if infra != nil { + _ = indexer.Add(infra) + } + return configv1listers.NewInfrastructureLister(indexer) +} diff --git a/openshift-kube-apiserver/admission/autoscaling/managednode/initializers.go b/openshift-kube-apiserver/admission/autoscaling/managednode/initializers.go new file mode 100644 index 0000000000000..512a5f8d031c0 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/managednode/initializers.go @@ -0,0 +1,28 @@ +package managednode + +import ( + "k8s.io/apiserver/pkg/admission" + + configv1informer "github.com/openshift/client-go/config/informers/externalversions/config/v1" +) + +func NewInitializer(infraInformer configv1informer.InfrastructureInformer) admission.PluginInitializer { + return &localInitializer{infraInformer: infraInformer} +} + +type WantsInfraInformer interface { + SetInfraInformer(informer configv1informer.InfrastructureInformer) + admission.InitializationValidator +} + +type localInitializer struct { + infraInformer configv1informer.InfrastructureInformer +} + +// Initialize will check the initialization interfaces implemented by each plugin +// and provide the appropriate initialization data +func (i *localInitializer) Initialize(plugin admission.Interface) { + if wants, ok := plugin.(WantsInfraInformer); ok { + wants.SetInfraInformer(i.infraInformer) + } +} diff --git a/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission.go b/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission.go new file mode 100644 index 0000000000000..9bf0a1f8a1cfa --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission.go @@ -0,0 +1,659 @@ +package managementcpusoverride + +import ( + "context" + "encoding/json" + "fmt" + "io" + "reflect" + "strings" + "time" + + configv1 "github.com/openshift/api/config/v1" + configv1informer "github.com/openshift/client-go/config/informers/externalversions/config/v1" + configv1listers "github.com/openshift/client-go/config/listers/config/v1" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/warning" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" + coreapi "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/kubelet/cm" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" +) + +const ( + PluginName = "autoscaling.openshift.io/ManagementCPUsOverride" + // timeToWaitForCacheSync contains the time how long to wait for caches to be synchronize + timeToWaitForCacheSync = 10 * time.Second + // containerWorkloadResourceSuffix contains the suffix for the container workload resource + containerWorkloadResourceSuffix = "workload.openshift.io/cores" + // podWorkloadTargetAnnotationPrefix contains the prefix for the pod workload target annotation + podWorkloadTargetAnnotationPrefix = "target.workload.openshift.io/" + // podWorkloadAnnotationEffect contains the effect key for the workload annotation value + podWorkloadAnnotationEffect = "effect" + // workloadEffectPreferredDuringScheduling contains the PreferredDuringScheduling effect value + workloadEffectPreferredDuringScheduling = "PreferredDuringScheduling" + // containerResourcesAnnotationPrefix contains resource annotation prefix that will be used by CRI-O to set cpu shares + containerResourcesAnnotationPrefix = "resources.workload.openshift.io/" + // containerResourcesAnnotationValueKeyCPUShares contains resource annotation value cpushares key + containerResourcesAnnotationValueKeyCPUShares = "cpushares" + // namespaceAllowedAnnotation contains the namespace allowed annotation key + namespaceAllowedAnnotation = "workload.openshift.io/allowed" + // workloadAdmissionWarning contains the admission warning annotation key + workloadAdmissionWarning = "workload.openshift.io/warning" + // infraClusterName contains the name of the cluster infrastructure resource + infraClusterName = "cluster" + // debugSourceResourceAnnotation contains the debug annotation that refers to the pod resource + debugSourceResourceAnnotation = "debug.openshift.io/source-resource" +) + +var _ = initializer.WantsExternalKubeInformerFactory(&managementCPUsOverride{}) +var _ = initializer.WantsExternalKubeClientSet(&managementCPUsOverride{}) +var _ = admission.MutationInterface(&managementCPUsOverride{}) +var _ = admission.ValidationInterface(&managementCPUsOverride{}) +var _ = WantsInfraInformer(&managementCPUsOverride{}) + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, + func(config io.Reader) (admission.Interface, error) { + return &managementCPUsOverride{ + Handler: admission.NewHandler(admission.Create), + }, nil + }) +} + +type resourceAnnotation struct { + // CPUShares contains resource annotation value cpushares key + CPUShares uint64 `json:"cpushares,omitempty"` + // CPULimit contains the cpu limit in millicores to be used by the container runtime to calculate + // quota + CPULimit int64 `json:"cpulimit,omitempty"` +} + +// managementCPUsOverride presents admission plugin that should replace pod container CPU requests with a new management resource. +// It applies to all pods that: +// 1. are in an allowed namespace +// 2. and have the workload annotation. +// +// It also sets the new management resource request and limit and set resource annotation that CRI-O can +// recognize and apply the relevant changes. +// For more information, see - https://github.com/openshift/enhancements/pull/703 +// +// Conditions for CPUs requests deletion: +// 1. The namespace should have allowed annotation "workload.openshift.io/allowed": "management" +// 2. The pod should have management annotation: "workload.openshift.io/management": "{"effect": "PreferredDuringScheduling"}" +// 3. All nodes under the cluster should have new management resource - "management.workload.openshift.io/cores" +// 4. The CPU request deletion will not change the pod QoS class +type managementCPUsOverride struct { + *admission.Handler + client kubernetes.Interface + nsLister corev1listers.NamespaceLister + nsListerSynced func() bool + nodeLister corev1listers.NodeLister + nodeListSynced func() bool + infraConfigLister configv1listers.InfrastructureLister + infraConfigListSynced func() bool +} + +func (a *managementCPUsOverride) SetExternalKubeInformerFactory(kubeInformers informers.SharedInformerFactory) { + a.nsLister = kubeInformers.Core().V1().Namespaces().Lister() + a.nsListerSynced = kubeInformers.Core().V1().Namespaces().Informer().HasSynced + a.nodeLister = kubeInformers.Core().V1().Nodes().Lister() + a.nodeListSynced = kubeInformers.Core().V1().Nodes().Informer().HasSynced +} + +// SetExternalKubeClientSet implements the WantsExternalKubeClientSet interface. +func (a *managementCPUsOverride) SetExternalKubeClientSet(client kubernetes.Interface) { + a.client = client +} + +func (a *managementCPUsOverride) SetInfraInformer(informer configv1informer.InfrastructureInformer) { + a.infraConfigLister = informer.Lister() + a.infraConfigListSynced = informer.Informer().HasSynced +} + +func (a *managementCPUsOverride) ValidateInitialization() error { + if a.client == nil { + return fmt.Errorf("%s plugin needs a kubernetes client", PluginName) + } + if a.nsLister == nil { + return fmt.Errorf("%s did not get a namespace lister", PluginName) + } + if a.nsListerSynced == nil { + return fmt.Errorf("%s plugin needs a namespace lister synced", PluginName) + } + if a.nodeLister == nil { + return fmt.Errorf("%s did not get a node lister", PluginName) + } + if a.nodeListSynced == nil { + return fmt.Errorf("%s plugin needs a node lister synced", PluginName) + } + if a.infraConfigLister == nil { + return fmt.Errorf("%s did not get a config infrastructure lister", PluginName) + } + if a.infraConfigListSynced == nil { + return fmt.Errorf("%s plugin needs a config infrastructure lister synced", PluginName) + } + return nil +} + +func (a *managementCPUsOverride) Admit(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) error { + if attr.GetResource().GroupResource() != coreapi.Resource("pods") || attr.GetSubresource() != "" { + return nil + } + + pod, ok := attr.GetObject().(*coreapi.Pod) + if !ok { + return admission.NewForbidden(attr, fmt.Errorf("unexpected object: %#v", attr.GetObject())) + } + + // do not mutate mirror pods at all + if isStaticPod(pod.Annotations) { + return nil + } + + podAnnotations := map[string]string{} + for k, v := range pod.Annotations { + podAnnotations[k] = v + } + + // strip any resource annotations specified by a user + stripResourcesAnnotations(pod.Annotations) + // strip any workload annotation to prevent from underlying components(CRI-O, kubelet) to apply any changes + // according to the workload annotation + stripWorkloadAnnotations(pod.Annotations) + + workloadType, err := getWorkloadType(podAnnotations) + if err != nil { + invalidError := getPodInvalidWorkloadAnnotationError(podAnnotations, err.Error()) + return errors.NewInvalid(coreapi.Kind("Pod"), pod.Name, field.ErrorList{invalidError}) + } + + // no workload annotation is specified under the pod + if len(workloadType) == 0 { + return nil + } + + if !a.waitForSyncedStore(time.After(timeToWaitForCacheSync)) { + return admission.NewForbidden(attr, fmt.Errorf("%s node or namespace or infra config cache not synchronized", PluginName)) + } + + nodes, err := a.nodeLister.List(labels.Everything()) + if err != nil { + return admission.NewForbidden(attr, err) // can happen due to informer latency + } + + // we still need to have nodes under the cluster to decide if the management resource enabled or not + if len(nodes) == 0 { + return admission.NewForbidden(attr, fmt.Errorf("%s the cluster does not have any nodes", PluginName)) + } + + clusterInfra, err := a.infraConfigLister.Get(infraClusterName) + if err != nil { + return admission.NewForbidden(attr, err) // can happen due to informer latency + } + + // the infrastructure status is empty, so we can not decide the cluster type + if reflect.DeepEqual(clusterInfra.Status, configv1.InfrastructureStatus{}) { + return admission.NewForbidden(attr, fmt.Errorf("%s infrastructure resource has empty status", PluginName)) + } + + // the infrastructure status is not empty, but topology related fields do not have any values indicates that + // the cluster is during the roll-back process to the version that does not support the topology fields + // the upgrade to 4.8 handled by the CR defaulting + if clusterInfra.Status.ControlPlaneTopology == "" && clusterInfra.Status.InfrastructureTopology == "" { + return nil + } + + // Check if we are in CPU Partitioning mode for AllNodes + if !isCPUPartitioning(clusterInfra.Status, nodes, workloadType) { + return nil + } + + // allow annotations on project to override management pods CPUs requests + ns, err := a.getPodNamespace(attr) + if err != nil { + return err + } + + if _, found := ns.Annotations[namespaceAllowedAnnotation]; !found && len(workloadType) > 0 { + pod.Annotations[workloadAdmissionWarning] = fmt.Sprintf( + "skipping pod CPUs requests modifications because the %s namespace is not annotated with %s to allow workload partitioning", + ns.GetName(), namespaceAllowedAnnotation) + return nil + } + + if !doesNamespaceAllowWorkloadType(ns.Annotations, workloadType) { + return admission.NewForbidden(attr, fmt.Errorf("%s the pod namespace %q does not allow the workload type %s", PluginName, ns.Name, workloadType)) + } + + workloadAnnotation := fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadType) + effect, err := getWorkloadAnnotationEffect(podAnnotations[workloadAnnotation]) + if err != nil { + invalidError := getPodInvalidWorkloadAnnotationError(podAnnotations, fmt.Sprintf("failed to get workload annotation effect: %v", err)) + return errors.NewInvalid(coreapi.Kind("Pod"), pod.Name, field.ErrorList{invalidError}) + } + + // TODO: currently we support only PreferredDuringScheduling effect + if effect != workloadEffectPreferredDuringScheduling { + invalidError := getPodInvalidWorkloadAnnotationError(podAnnotations, fmt.Sprintf("only %q effect is supported", workloadEffectPreferredDuringScheduling)) + return errors.NewInvalid(coreapi.Kind("Pod"), pod.Name, field.ErrorList{invalidError}) + } + + allContainers := append([]coreapi.Container{}, pod.Spec.InitContainers...) + allContainers = append(allContainers, pod.Spec.Containers...) + podQoSClass := getPodQoSClass(allContainers) + + // we do not want to change guaranteed pods resource allocation, because it should be managed by + // relevant managers(CPU and memory) under the kubelet + if podQoSClass == coreapi.PodQOSGuaranteed { + pod.Annotations[workloadAdmissionWarning] = "skip pod CPUs requests modifications because it has guaranteed QoS class" + return nil + } + + // before we update the pod available under admission attributes, we need to verify that deletion of the CPU request + // will not change the pod QoS class, otherwise skip pod mutation + // 1. Copy the pod + // 2. Delete CPUs requests for all containers under the pod + // 3. Get modified pod QoS class + // 4. Verify that the pod QoS class before and after the modification stay the same + // 5. Update the pod under admission attributes + podCopy := pod.DeepCopy() + updatePodResources(podCopy, workloadType, podQoSClass) + + allContainersCopy := append([]coreapi.Container{}, podCopy.Spec.InitContainers...) + allContainersCopy = append(allContainersCopy, podCopy.Spec.Containers...) + podQoSClassAfterModification := getPodQoSClass(allContainersCopy) + + if podQoSClass != podQoSClassAfterModification { + pod.Annotations[workloadAdmissionWarning] = fmt.Sprintf("skip pod CPUs requests modifications because it will change the pod QoS class from %s to %s", podQoSClass, podQoSClassAfterModification) + return nil + } + + updatePodResources(pod, workloadType, podQoSClass) + + return nil +} + +func isCPUPartitioning(infraStatus configv1.InfrastructureStatus, nodes []*corev1.Node, workloadType string) bool { + // If status is not for CPU partitioning and we're single node we also check nodes to support upgrade event + // TODO: This should not be needed after 4.13 as all clusters after should have this feature on at install time, or updated by migration in NTO. + if infraStatus.CPUPartitioning != configv1.CPUPartitioningAllNodes && infraStatus.ControlPlaneTopology == configv1.SingleReplicaTopologyMode { + managedResource := fmt.Sprintf("%s.%s", workloadType, containerWorkloadResourceSuffix) + for _, node := range nodes { + // We only expect a single node to exist, so we return on first hit + if _, ok := node.Status.Allocatable[corev1.ResourceName(managedResource)]; ok { + return true + } + } + } + return infraStatus.CPUPartitioning == configv1.CPUPartitioningAllNodes +} + +func (a *managementCPUsOverride) getPodNamespace(attr admission.Attributes) (*corev1.Namespace, error) { + ns, err := a.nsLister.Get(attr.GetNamespace()) + if err == nil { + return ns, nil + } + + if !errors.IsNotFound(err) { + return nil, admission.NewForbidden(attr, err) + } + + // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not + ns, err = a.client.CoreV1().Namespaces().Get(context.TODO(), attr.GetNamespace(), metav1.GetOptions{}) + if err == nil { + return ns, nil + } + + if !errors.IsNotFound(err) { + return nil, admission.NewForbidden(attr, err) + } + + return nil, err +} + +func (a *managementCPUsOverride) waitForSyncedStore(timeout <-chan time.Time) bool { + for !a.nsListerSynced() || !a.nodeListSynced() || !a.infraConfigListSynced() { + select { + case <-time.After(100 * time.Millisecond): + case <-timeout: + return a.nsListerSynced() && a.nodeListSynced() && a.infraConfigListSynced() + } + } + + return true +} + +func updatePodResources(pod *coreapi.Pod, workloadType string, class coreapi.PodQOSClass) { + if pod.Annotations == nil { + pod.Annotations = map[string]string{} + } + + // update init containers resources + updateContainersResources(pod.Spec.InitContainers, pod.Annotations, workloadType, class) + + // update app containers resources + updateContainersResources(pod.Spec.Containers, pod.Annotations, workloadType, class) + + // re-add workload annotation + addWorkloadAnnotations(pod.Annotations, workloadType) +} + +func updateContainersResources(containers []coreapi.Container, podAnnotations map[string]string, workloadType string, podQoSClass coreapi.PodQOSClass) { + for i := range containers { + c := &containers[i] + cpusharesAnnotationKey := fmt.Sprintf("%s%s", containerResourcesAnnotationPrefix, c.Name) + + // make sure best effort is always 2 shares, it the minimal shares that supported + // see - https://github.com/kubernetes/kubernetes/blob/46563b0abebbb00e21db967950a1343e83a0c6a2/pkg/kubelet/cm/qos_container_manager_linux.go#L99 + if podQoSClass == coreapi.PodQOSBestEffort { + podAnnotations[cpusharesAnnotationKey] = fmt.Sprintf(`{"%s": 2}`, containerResourcesAnnotationValueKeyCPUShares) + continue + } + + resourceAnno := resourceAnnotation{} + + if c.Resources.Limits != nil { + if value, ok := c.Resources.Limits[coreapi.ResourceCPU]; ok { + resourceAnno.CPULimit = value.MilliValue() + } + } + + if c.Resources.Requests != nil { + if _, ok := c.Resources.Requests[coreapi.ResourceCPU]; !ok { + continue + } + + cpuRequest := c.Resources.Requests[coreapi.ResourceCPU] + cpuRequestInMilli := cpuRequest.MilliValue() + + // Casting to uint64, Linux build returns uint64, noop Darwin build returns int64 + resourceAnno.CPUShares = uint64(cm.MilliCPUToShares(cpuRequestInMilli)) + + // This should not error but if something does go wrong we default to string creation of just CPU Shares + // and add a warning annotation + resourceAnnoString, err := json.Marshal(resourceAnno) + if err != nil { + podAnnotations[workloadAdmissionWarning] = fmt.Sprintf("failed to marshal cpu resources, using fallback: err: %s", err.Error()) + podAnnotations[cpusharesAnnotationKey] = fmt.Sprintf(`{"%s": %d}`, containerResourcesAnnotationValueKeyCPUShares, resourceAnno.CPUShares) + } else { + podAnnotations[cpusharesAnnotationKey] = string(resourceAnnoString) + } + delete(c.Resources.Requests, coreapi.ResourceCPU) + delete(c.Resources.Limits, coreapi.ResourceCPU) + + if c.Resources.Limits == nil { + c.Resources.Limits = coreapi.ResourceList{} + } + + // multiply the CPU request by 1000, to make sure that the resource will pass integer validation + managedResource := fmt.Sprintf("%s.%s", workloadType, containerWorkloadResourceSuffix) + newCPURequest := resource.NewMilliQuantity(cpuRequestInMilli*1000, cpuRequest.Format) + c.Resources.Requests[coreapi.ResourceName(managedResource)] = *newCPURequest + c.Resources.Limits[coreapi.ResourceName(managedResource)] = *newCPURequest + } + } +} + +func IsGuaranteed(containers []coreapi.Container) bool { + for _, c := range containers { + // only memory and CPU resources are relevant to decide pod QoS class + for _, r := range []coreapi.ResourceName{coreapi.ResourceMemory, coreapi.ResourceCPU} { + limit := c.Resources.Limits[r] + request, requestExist := c.Resources.Requests[r] + + if limit.IsZero() { + return false + } + + if !requestExist { + continue + } + + // it some corner case, when you set CPU request to 0 the k8s will change it to the value + // specified under the limit + if r == coreapi.ResourceCPU && request.IsZero() { + continue + } + + if !limit.Equal(request) { + return false + } + } + } + + return true +} + +func isBestEffort(containers []coreapi.Container) bool { + for _, c := range containers { + // only memory and CPU resources are relevant to decide pod QoS class + for _, r := range []coreapi.ResourceName{coreapi.ResourceMemory, coreapi.ResourceCPU} { + limit := c.Resources.Limits[r] + request := c.Resources.Requests[r] + + if !limit.IsZero() || !request.IsZero() { + return false + } + } + } + + return true +} + +func getPodQoSClass(containers []coreapi.Container) coreapi.PodQOSClass { + if IsGuaranteed(containers) { + return coreapi.PodQOSGuaranteed + } + + if isBestEffort(containers) { + return coreapi.PodQOSBestEffort + } + + return coreapi.PodQOSBurstable +} + +func podHasBothCPULimitAndRequest(containers []coreapi.Container) bool { + for _, c := range containers { + _, cpuRequestExists := c.Resources.Requests[coreapi.ResourceCPU] + _, cpuLimitExists := c.Resources.Limits[coreapi.ResourceCPU] + + if cpuRequestExists && cpuLimitExists { + return true + } + } + + return false +} + +// doesNamespaceAllowWorkloadType will return false when a workload type does not match any present ones. +func doesNamespaceAllowWorkloadType(annotations map[string]string, workloadType string) bool { + v, found := annotations[namespaceAllowedAnnotation] + // When a namespace contains no annotation for workloads we infer that to mean all workload types are allowed. + // The mutation hook will strip all workload annotation from pods that contain them in that circumstance. + if !found { + return true + } + + for _, t := range strings.Split(v, ",") { + if workloadType == t { + return true + } + } + + return false +} + +func getWorkloadType(annotations map[string]string) (string, error) { + var workloadAnnotationsKeys []string + for k := range annotations { + if strings.HasPrefix(k, podWorkloadTargetAnnotationPrefix) { + workloadAnnotationsKeys = append(workloadAnnotationsKeys, k) + } + } + + // no workload annotation is specified under the pod + if len(workloadAnnotationsKeys) == 0 { + return "", nil + } + + // more than one workload annotation exists under the pod and we do not support different workload types + // under the same pod + if len(workloadAnnotationsKeys) > 1 { + return "", fmt.Errorf("the pod can not have more than one workload annotations") + } + + workloadType := strings.TrimPrefix(workloadAnnotationsKeys[0], podWorkloadTargetAnnotationPrefix) + if len(workloadType) == 0 { + return "", fmt.Errorf("the workload annotation key should have format %s, when is non empty string", podWorkloadTargetAnnotationPrefix) + } + + return workloadType, nil +} + +func getWorkloadAnnotationEffect(workloadAnnotationKey string) (string, error) { + managementAnnotationValue := map[string]string{} + if err := json.Unmarshal([]byte(workloadAnnotationKey), &managementAnnotationValue); err != nil { + return "", fmt.Errorf("failed to parse %q annotation value: %v", workloadAnnotationKey, err) + } + + if len(managementAnnotationValue) > 1 { + return "", fmt.Errorf("the workload annotation value %q has more than one key", managementAnnotationValue) + } + + effect, ok := managementAnnotationValue[podWorkloadAnnotationEffect] + if !ok { + return "", fmt.Errorf("the workload annotation value %q does not have %q key", managementAnnotationValue, podWorkloadAnnotationEffect) + } + return effect, nil +} + +func stripResourcesAnnotations(annotations map[string]string) { + for k := range annotations { + if strings.HasPrefix(k, containerResourcesAnnotationPrefix) { + delete(annotations, k) + } + } +} + +func stripWorkloadAnnotations(annotations map[string]string) { + for k := range annotations { + if strings.HasPrefix(k, podWorkloadTargetAnnotationPrefix) { + delete(annotations, k) + } + } +} + +func addWorkloadAnnotations(annotations map[string]string, workloadType string) { + if annotations == nil { + annotations = map[string]string{} + } + + workloadAnnotation := fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadType) + annotations[workloadAnnotation] = fmt.Sprintf(`{"%s":"%s"}`, podWorkloadAnnotationEffect, workloadEffectPreferredDuringScheduling) +} + +func (a *managementCPUsOverride) Validate(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) (err error) { + if attr.GetResource().GroupResource() != coreapi.Resource("pods") || attr.GetSubresource() != "" { + return nil + } + + pod, ok := attr.GetObject().(*coreapi.Pod) + if !ok { + return admission.NewForbidden(attr, fmt.Errorf("unexpected object: %#v", attr.GetObject())) + } + + // do not validate mirror pods at all + if isStaticPod(pod.Annotations) { + return nil + } + + ns, err := a.getPodNamespace(attr) + if err != nil { + return err + } + + var allErrs field.ErrorList + workloadType, err := getWorkloadType(pod.Annotations) + if err != nil { + allErrs = append(allErrs, getPodInvalidWorkloadAnnotationError(pod.Annotations, err.Error())) + } + + workloadResourceAnnotations := resourceAnnotation{} + hasWorkloadAnnotation := false + for k, v := range pod.Annotations { + if !strings.HasPrefix(k, containerResourcesAnnotationPrefix) { + continue + } + hasWorkloadAnnotation = true + + // Custom decoder to print invalid fields for resources + decoder := json.NewDecoder(strings.NewReader(v)) + decoder.DisallowUnknownFields() + if err := decoder.Decode(&workloadResourceAnnotations); err != nil { + allErrs = append(allErrs, getPodInvalidWorkloadAnnotationError(pod.Annotations, err.Error())) + } + } + + containersWorkloadResources := map[string]*coreapi.Container{} + allContainers := append([]coreapi.Container{}, pod.Spec.InitContainers...) + allContainers = append(allContainers, pod.Spec.Containers...) + for i := range allContainers { + c := &allContainers[i] + // we interested only in request because only the request affects the scheduler + for r := range c.Resources.Requests { + resourceName := string(r) + if strings.HasSuffix(resourceName, containerWorkloadResourceSuffix) { + containersWorkloadResources[resourceName] = c + } + } + } + + switch { + case len(workloadType) == 0: // the pod does not have workload annotation + if hasWorkloadAnnotation { + allErrs = append(allErrs, getPodInvalidWorkloadAnnotationError(pod.Annotations, "the pod without workload annotation can not have resource annotation")) + } + + for resourceName, c := range containersWorkloadResources { + if isDebugPod(pod.Annotations) { + warning.AddWarning(ctx, "", "You must pass --keep-annotations parameter to the debug command or upgrade the oc tool to the latest version when trying to debug a pod with workload partitioning resources.") + } + + allErrs = append(allErrs, field.Invalid(field.NewPath("spec.containers.resources.requests"), c.Resources.Requests, fmt.Sprintf("the pod without workload annotations can not have containers with workload resources %q", resourceName))) + } + case !doesNamespaceAllowWorkloadType(ns.Annotations, workloadType): // pod has workload annotation, but the namespace does not allow specified workload + allErrs = append(allErrs, getPodInvalidWorkloadAnnotationError(pod.Annotations, fmt.Sprintf("the namespace %q does not allow the workload type %s", ns.Name, workloadType))) + } + + if len(allErrs) == 0 { + return nil + } + + return errors.NewInvalid(coreapi.Kind("Pod"), pod.Name, allErrs) +} + +func getPodInvalidWorkloadAnnotationError(annotations map[string]string, message string) *field.Error { + return field.Invalid(field.NewPath("metadata.Annotations"), annotations, message) +} + +// isStaticPod returns true if the pod is a static pod. +func isStaticPod(annotations map[string]string) bool { + source, ok := annotations[kubetypes.ConfigSourceAnnotationKey] + return ok && source != kubetypes.ApiserverSource +} + +func isDebugPod(annotations map[string]string) bool { + _, ok := annotations[debugSourceResourceAnnotation] + return ok +} diff --git a/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission_test.go b/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission_test.go new file mode 100644 index 0000000000000..9564bffe39578 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission_test.go @@ -0,0 +1,708 @@ +package managementcpusoverride + +import ( + "context" + "fmt" + "reflect" + "strings" + "testing" + + configv1 "github.com/openshift/api/config/v1" + configv1listers "github.com/openshift/client-go/config/listers/config/v1" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/kubernetes/fake" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + coreapi "k8s.io/kubernetes/pkg/apis/core" + kapi "k8s.io/kubernetes/pkg/apis/core" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" +) + +const ( + // workloadTypeManagement contains the type for the management workload + workloadTypeManagement = "management" + // managedCapacityLabel contains the name of the new management resource that will available under the node + managedCapacityLabel = "management.workload.openshift.io/cores" +) + +func getMockManagementCPUsOverride(namespace *corev1.Namespace, nodes []*corev1.Node, infra *configv1.Infrastructure) (*managementCPUsOverride, error) { + m := &managementCPUsOverride{ + Handler: admission.NewHandler(admission.Create), + client: &fake.Clientset{}, + nsLister: fakeNamespaceLister(namespace), + nsListerSynced: func() bool { return true }, + nodeLister: fakeNodeLister(nodes), + nodeListSynced: func() bool { return true }, + infraConfigLister: fakeInfraConfigLister(infra), + infraConfigListSynced: func() bool { return true }, + } + if err := m.ValidateInitialization(); err != nil { + return nil, err + } + + return m, nil +} + +func fakeNamespaceLister(ns *corev1.Namespace) corev1listers.NamespaceLister { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + _ = indexer.Add(ns) + return corev1listers.NewNamespaceLister(indexer) +} + +func fakeNodeLister(nodes []*corev1.Node) corev1listers.NodeLister { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + for _, node := range nodes { + _ = indexer.Add(node) + } + return corev1listers.NewNodeLister(indexer) +} + +func fakeInfraConfigLister(infra *configv1.Infrastructure) configv1listers.InfrastructureLister { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + if infra != nil { + _ = indexer.Add(infra) + } + return configv1listers.NewInfrastructureLister(indexer) +} + +func TestAdmit(t *testing.T) { + tests := []struct { + name string + pod *kapi.Pod + namespace *corev1.Namespace + nodes []*corev1.Node + infra *configv1.Infrastructure + expectedCpuRequest resource.Quantity + expectedAnnotations map[string]string + expectedError error + }{ + { + name: "should return admission error when the pod namespace does not allow the workload type", + pod: testManagedPodWithWorkloadAnnotation("500m", "250m", "500Mi", "250Mi", "non-existent"), + expectedCpuRequest: resource.MustParse("250m"), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + infra: testClusterSNOInfra(), + expectedError: fmt.Errorf("the pod namespace %q does not allow the workload type non-existent", "managed-namespace"), + }, + { + name: "should ignore pods that do not have managed annotation", + pod: testPod("500m", "250m", "500Mi", "250Mi"), + expectedCpuRequest: resource.MustParse("250m"), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + }, + { + name: "should return admission error when the pod has more than one workload annotation", + pod: testManagedPodWithAnnotations( + "500m", + "250m", + "500Mi", + "250Mi", + map[string]string{ + fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement): "", + fmt.Sprintf("%stest", podWorkloadTargetAnnotationPrefix): "", + }, + ), + expectedCpuRequest: resource.MustParse("250m"), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + infra: testClusterSNOInfra(), + expectedError: fmt.Errorf("the pod can not have more than one workload annotations"), + }, + { + name: "should return admission error when the pod has incorrect workload annotation", + pod: testManagedPodWithAnnotations( + "500m", + "250m", + "500Mi", + "250Mi", + map[string]string{ + podWorkloadTargetAnnotationPrefix: "", + }, + ), + expectedCpuRequest: resource.MustParse("250m"), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + infra: testClusterSNOInfra(), + expectedError: fmt.Errorf("the workload annotation key should have format %s", podWorkloadTargetAnnotationPrefix), + }, + { + name: "should return admission error when the pod has incorrect workload annotation effect", + pod: testManagedPodWithAnnotations( + "500m", + "250m", + "500Mi", + "250Mi", + map[string]string{ + fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement): "{", + }, + ), + expectedCpuRequest: resource.MustParse("250m"), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + infra: testClusterSNOInfra(), + expectedError: fmt.Errorf(`failed to get workload annotation effect: failed to parse "{" annotation value: unexpected end of JSON input`), + }, + { + name: "should return admission error when the pod has workload annotation without effect value", + pod: testManagedPodWithAnnotations( + "500m", + "250m", + "500Mi", + "250Mi", + map[string]string{ + fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement): `{"test": "test"}`, + }, + ), + expectedCpuRequest: resource.MustParse("250m"), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + expectedError: fmt.Errorf(`failed to get workload annotation effect: the workload annotation value map["test":"test"] does not have "effect" key`), + infra: testClusterSNOInfra(), + }, + { + name: "should return admission warning when the pod has workload annotation but the namespace does not", + pod: testManagedPodWithAnnotations( + "500m", + "250m", + "500Mi", + "250Mi", + map[string]string{ + fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement): `{"test": "test"}`, + }, + ), + expectedCpuRequest: resource.MustParse("250m"), + expectedAnnotations: map[string]string{ + workloadAdmissionWarning: "skipping pod CPUs requests modifications because the namespace namespace is not annotated with workload.openshift.io/allowed to allow workload partitioning", + }, + namespace: testNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + infra: testClusterSNOInfra(), + }, + { + name: "should delete CPU requests and update workload CPU annotations for the burstable pod with managed annotation", + pod: testManagedPod("", "250m", "500Mi", "250Mi"), + expectedCpuRequest: resource.Quantity{}, + namespace: testManagedNamespace(), + expectedAnnotations: map[string]string{ + fmt.Sprintf("%s%s", containerResourcesAnnotationPrefix, "test"): fmt.Sprintf(`{"%s":256}`, containerResourcesAnnotationValueKeyCPUShares), + fmt.Sprintf("%s%s", containerResourcesAnnotationPrefix, "initTest"): fmt.Sprintf(`{"%s":256}`, containerResourcesAnnotationValueKeyCPUShares), + fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement): fmt.Sprintf(`{"%s":"%s"}`, podWorkloadAnnotationEffect, workloadEffectPreferredDuringScheduling), + }, + nodes: []*corev1.Node{testNodeWithManagementResource()}, + infra: testClusterSNOInfra(), + }, + { + name: "should update workload CPU annotations for the best-effort pod with managed annotation", + pod: testManagedPod("", "", "", ""), + expectedCpuRequest: resource.Quantity{}, + namespace: testManagedNamespace(), + expectedAnnotations: map[string]string{ + fmt.Sprintf("%s%s", containerResourcesAnnotationPrefix, "test"): fmt.Sprintf(`{"%s": 2}`, containerResourcesAnnotationValueKeyCPUShares), + fmt.Sprintf("%s%s", containerResourcesAnnotationPrefix, "initTest"): fmt.Sprintf(`{"%s": 2}`, containerResourcesAnnotationValueKeyCPUShares), + fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement): fmt.Sprintf(`{"%s":"%s"}`, podWorkloadAnnotationEffect, workloadEffectPreferredDuringScheduling), + }, + nodes: []*corev1.Node{testNodeWithManagementResource()}, + infra: testClusterSNOInfra(), + }, + { + name: "should skip static pod mutation", + pod: testManagedStaticPod("500m", "250m", "500Mi", "250Mi"), + expectedCpuRequest: resource.MustParse("250m"), + namespace: testManagedNamespace(), + expectedAnnotations: map[string]string{ + fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement): fmt.Sprintf(`{"%s":"%s"}`, podWorkloadAnnotationEffect, workloadEffectPreferredDuringScheduling), + kubetypes.ConfigSourceAnnotationKey: kubetypes.FileSource, + }, + nodes: []*corev1.Node{testNodeWithManagementResource()}, + infra: testClusterSNOInfra(), + }, + { + name: "should ignore guaranteed pod", + pod: testManagedPod("500m", "500m", "500Mi", "500Mi"), + expectedCpuRequest: resource.MustParse("500m"), + namespace: testManagedNamespace(), + expectedAnnotations: map[string]string{ + workloadAdmissionWarning: "skip pod CPUs requests modifications because it has guaranteed QoS class", + }, + nodes: []*corev1.Node{testNodeWithManagementResource()}, + infra: testClusterSNOInfra(), + }, + { + name: "should not ignore pod when one of pod containers have both CPU limit and request", + pod: testManagedPod("500m", "250m", "500Mi", ""), + expectedCpuRequest: resource.Quantity{}, + namespace: testManagedNamespace(), + expectedAnnotations: map[string]string{ + fmt.Sprintf("%s%s", containerResourcesAnnotationPrefix, "test"): fmt.Sprintf(`{"%s":256,"cpulimit":500}`, containerResourcesAnnotationValueKeyCPUShares), + fmt.Sprintf("%s%s", containerResourcesAnnotationPrefix, "initTest"): fmt.Sprintf(`{"%s":256,"cpulimit":500}`, containerResourcesAnnotationValueKeyCPUShares), + fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement): fmt.Sprintf(`{"%s":"%s"}`, podWorkloadAnnotationEffect, workloadEffectPreferredDuringScheduling), + }, + nodes: []*corev1.Node{testNodeWithManagementResource()}, + infra: testClusterSNOInfra(), + }, + { + name: "should ignore pod when removing the CPU request will change the pod QoS class to best-effort", + pod: testManagedPod("", "250m", "", ""), + expectedCpuRequest: resource.MustParse("250m"), + namespace: testManagedNamespace(), + expectedAnnotations: map[string]string{ + workloadAdmissionWarning: fmt.Sprintf("skip pod CPUs requests modifications because it will change the pod QoS class from %s to %s", corev1.PodQOSBurstable, corev1.PodQOSBestEffort), + }, + nodes: []*corev1.Node{testNodeWithManagementResource()}, + infra: testClusterSNOInfra(), + }, + { + name: "should not mutate the pod when cpu partitioning is not set to AllNodes", + pod: testManagedPod("500m", "250m", "500Mi", "250Mi"), + expectedCpuRequest: resource.MustParse("250m"), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNode()}, + infra: testClusterInfraWithoutWorkloadPartitioning(), + }, + { + name: "should return admission error when the cluster does not have any nodes", + pod: testManagedPod("500m", "250m", "500Mi", "250Mi"), + expectedCpuRequest: resource.MustParse("250m"), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{}, + infra: testClusterSNOInfra(), + expectedError: fmt.Errorf("the cluster does not have any nodes"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + m, err := getMockManagementCPUsOverride(test.namespace, test.nodes, test.infra) + if err != nil { + t.Fatalf("%s: failed to get mock managementCPUsOverride: %v", test.name, err) + } + + test.pod.Namespace = test.namespace.Name + + attrs := admission.NewAttributesRecord(test.pod, nil, schema.GroupVersionKind{}, test.pod.Namespace, test.pod.Name, kapi.Resource("pods").WithVersion("version"), "", admission.Create, nil, false, fakeUser()) + err = m.Admit(context.TODO(), attrs, nil) + if err != nil { + if test.expectedError == nil { + t.Fatalf("%s: admission controller returned error: %v", test.name, err) + } + + if !strings.Contains(err.Error(), test.expectedError.Error()) { + t.Fatalf("%s: the expected error %v, got %v", test.name, test.expectedError, err) + } + } + + if err == nil && test.expectedError != nil { + t.Fatalf("%s: the expected error %v, got nil", test.name, test.expectedError) + } + + if test.expectedAnnotations != nil && !reflect.DeepEqual(test.expectedAnnotations, test.pod.Annotations) { + t.Fatalf("%s: the pod annotations do not match; %v should be %v", test.name, test.pod.Annotations, test.expectedAnnotations) + } + + resources := test.pod.Spec.InitContainers[0].Resources // only test one container + if actual := resources.Requests[kapi.ResourceCPU]; test.expectedCpuRequest.Cmp(actual) != 0 { + t.Fatalf("%s: cpu requests do not match; %v should be %v", test.name, actual, test.expectedCpuRequest) + } + + resources = test.pod.Spec.Containers[0].Resources // only test one container + if actual := resources.Requests[kapi.ResourceCPU]; test.expectedCpuRequest.Cmp(actual) != 0 { + t.Fatalf("%s: cpu requests do not match; %v should be %v", test.name, actual, test.expectedCpuRequest) + } + }) + } +} + +func TestGetPodQoSClass(t *testing.T) { + tests := []struct { + name string + pod *kapi.Pod + expectedQoSClass coreapi.PodQOSClass + }{ + { + name: "should recognize best-effort pod", + pod: testManagedPod("", "", "", ""), + expectedQoSClass: coreapi.PodQOSBestEffort, + }, + { + name: "should recognize guaranteed pod", + pod: testManagedPod("100m", "100m", "100Mi", "100Mi"), + expectedQoSClass: coreapi.PodQOSGuaranteed, + }, + { + name: "should recognize guaranteed pod when CPU request equals to 0", + pod: testManagedPod("100m", "0", "100Mi", "100Mi"), + expectedQoSClass: coreapi.PodQOSGuaranteed, + }, + { + name: "should recognize burstable pod with only CPU limit", + pod: testManagedPod("100m", "", "", ""), + expectedQoSClass: coreapi.PodQOSBurstable, + }, + { + name: "should recognize burstable pod with only CPU request", + pod: testManagedPod("", "100m", "", ""), + expectedQoSClass: coreapi.PodQOSBurstable, + }, + { + name: "should recognize burstable pod with only memory limit", + pod: testManagedPod("", "", "100Mi", ""), + expectedQoSClass: coreapi.PodQOSBurstable, + }, + { + name: "should recognize burstable pod with only memory request", + pod: testManagedPod("", "", "", "100Mi"), + expectedQoSClass: coreapi.PodQOSBurstable, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + allContainers := append([]coreapi.Container{}, test.pod.Spec.InitContainers...) + allContainers = append(allContainers, test.pod.Spec.Containers...) + qosClass := getPodQoSClass(allContainers) + if qosClass != test.expectedQoSClass { + t.Fatalf("%s: pod has QoS class %s; should be %s", test.name, qosClass, test.expectedQoSClass) + } + }) + } +} + +func TestValidate(t *testing.T) { + tests := []struct { + name string + pod *kapi.Pod + namespace *corev1.Namespace + nodes []*corev1.Node + expectedError error + }{ + { + name: "should return invalid error when the pod has more than one workload annotation", + pod: testManagedPodWithAnnotations( + "500m", + "250m", + "500Mi", + "250Mi", + map[string]string{ + fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement): "", + fmt.Sprintf("%stest", podWorkloadTargetAnnotationPrefix): "", + }, + ), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + expectedError: fmt.Errorf("the pod can not have more than one workload annotations"), + }, + { + name: "should return invalid error when the pod has incorrect workload annotation", + pod: testManagedPodWithAnnotations( + "500m", + "250m", + "500Mi", + "250Mi", + map[string]string{ + podWorkloadTargetAnnotationPrefix: "", + }, + ), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + expectedError: fmt.Errorf("the workload annotation key should have format %s", podWorkloadTargetAnnotationPrefix), + }, + { + name: "should return invalid error when the pod has cpuset resource annotation", + pod: testManagedPodWithAnnotations( + "500m", + "250m", + "500Mi", + "250Mi", + map[string]string{ + fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement): fmt.Sprintf(`{"%s":"%s"}`, podWorkloadAnnotationEffect, workloadEffectPreferredDuringScheduling), + fmt.Sprintf("%s%s", containerResourcesAnnotationPrefix, "test"): `{"cpuset": 1}`, + }, + ), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + expectedError: fmt.Errorf("json: unknown field \"cpuset\""), + }, + { + name: "should return invalid error when the pod does not have workload annotation, but has resource annotation", + pod: testManagedPodWithAnnotations( + "500m", + "250m", + "500Mi", + "250Mi", + map[string]string{ + fmt.Sprintf("%s%s", containerResourcesAnnotationPrefix, "test"): fmt.Sprintf(`{"%s": 2}`, containerResourcesAnnotationValueKeyCPUShares), + }, + ), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + expectedError: fmt.Errorf("the pod without workload annotation can not have resource annotation"), + }, + { + name: "should return invalid error when the pod does not have workload annotation, but the container has management resource", + pod: testPodWithManagedResource( + "500m", + "250m", + "500Mi", + "250Mi", + ), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + expectedError: fmt.Errorf("the pod without workload annotations can not have containers with workload resources %q", "management.workload.openshift.io/cores"), + }, + { + name: "should return invalid error when the pod has workload annotation, but the pod namespace does not have allowed workload type", + pod: testManagedPodWithWorkloadAnnotation( + "500m", + "250m", + "500Mi", + "250Mi", + "non-existent", + ), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + expectedError: fmt.Errorf("the namespace %q does not allow the workload type %s", "managed-namespace", "non-existent"), + }, + { + name: "should not return any errors when the pod has workload annotation, but the pod namespace has no annotations", + pod: testManagedPod( + "500m", + "250m", + "500Mi", + "250Mi", + ), + namespace: testNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + }, + { + name: "should not return any errors when the pod and namespace valid", + pod: testManagedPodWithAnnotations( + "500m", + "250m", + "500Mi", + "250Mi", + map[string]string{ + fmt.Sprintf("%s%s", containerResourcesAnnotationPrefix, "test"): fmt.Sprintf(`{"%s": 256}`, containerResourcesAnnotationValueKeyCPUShares), + fmt.Sprintf("%s%s", containerResourcesAnnotationPrefix, "initTest"): fmt.Sprintf(`{"%s": 256}`, containerResourcesAnnotationValueKeyCPUShares), + fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement): fmt.Sprintf(`{"%s":"%s"}`, podWorkloadAnnotationEffect, workloadEffectPreferredDuringScheduling), + }, + ), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + }, + { + name: "should skip static pod validation", + pod: testManagedPodWithAnnotations( + "500m", + "250m", + "500Mi", + "250Mi", + map[string]string{ + fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement): "", + fmt.Sprintf("%stest", podWorkloadTargetAnnotationPrefix): "", + kubetypes.ConfigSourceAnnotationKey: kubetypes.FileSource, + }, + ), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + m, err := getMockManagementCPUsOverride(test.namespace, test.nodes, nil) + if err != nil { + t.Fatalf("%s: failed to get mock managementCPUsOverride: %v", test.name, err) + } + test.pod.Namespace = test.namespace.Name + + attrs := admission.NewAttributesRecord(test.pod, nil, schema.GroupVersionKind{}, test.pod.Namespace, test.pod.Name, kapi.Resource("pods").WithVersion("version"), "", admission.Create, nil, false, fakeUser()) + err = m.Validate(context.TODO(), attrs, nil) + if err != nil { + if test.expectedError == nil { + t.Fatalf("%s: admission controller returned error: %v", test.name, err) + } + + if !strings.Contains(err.Error(), test.expectedError.Error()) { + t.Fatalf("%s: the expected error %v, got %v", test.name, test.expectedError, err) + } + } + + if err == nil && test.expectedError != nil { + t.Fatalf("%s: the expected error %v, got nil", test.name, test.expectedError) + } + }) + } +} + +func testPodWithManagedResource(cpuLimit, cpuRequest, memoryLimit, memoryRequest string) *kapi.Pod { + pod := testPod(cpuLimit, cpuRequest, memoryLimit, memoryRequest) + + managedResourceName := fmt.Sprintf("%s.%s", workloadTypeManagement, containerWorkloadResourceSuffix) + + managedResourceQuantity := resource.MustParse("26") + pod.Spec.Containers[0].Resources.Requests[kapi.ResourceName(managedResourceName)] = managedResourceQuantity + return pod +} + +func testManagedPodWithAnnotations(cpuLimit, cpuRequest, memoryLimit, memoryRequest string, annotations map[string]string) *kapi.Pod { + pod := testManagedPod(cpuLimit, cpuRequest, memoryLimit, memoryRequest) + pod.Annotations = annotations + return pod +} + +func testManagedStaticPod(cpuLimit, cpuRequest, memoryLimit, memoryRequest string) *kapi.Pod { + pod := testManagedPod(cpuLimit, cpuRequest, memoryLimit, memoryRequest) + pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = kubetypes.FileSource + return pod +} + +func testManagedPod(cpuLimit, cpuRequest, memoryLimit, memoryRequest string) *kapi.Pod { + return testManagedPodWithWorkloadAnnotation(cpuLimit, cpuRequest, memoryLimit, memoryRequest, workloadTypeManagement) +} + +func testManagedPodWithWorkloadAnnotation(cpuLimit, cpuRequest, memoryLimit, memoryRequest string, workloadType string) *kapi.Pod { + pod := testPod(cpuLimit, cpuRequest, memoryLimit, memoryRequest) + managementWorkloadAnnotation := fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadType) + pod.Annotations = map[string]string{ + managementWorkloadAnnotation: fmt.Sprintf(`{"%s":"%s"}`, podWorkloadAnnotationEffect, workloadEffectPreferredDuringScheduling), + } + + return pod +} + +func testPod(cpuLimit, cpuRequest, memoryLimit, memoryRequest string) *kapi.Pod { + pod := &kapi.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: kapi.PodSpec{ + InitContainers: []kapi.Container{ + { + Name: "initTest", + }, + }, + Containers: []kapi.Container{ + { + Name: "test", + }, + }, + }, + } + + var limits kapi.ResourceList + // we need this kind of statement to verify assignment to entry in nil map + if cpuLimit != "" || memoryLimit != "" { + limits = kapi.ResourceList{} + if cpuLimit != "" { + limits[kapi.ResourceCPU] = resource.MustParse(cpuLimit) + } + + if memoryLimit != "" { + limits[kapi.ResourceMemory] = resource.MustParse(memoryLimit) + } + + pod.Spec.InitContainers[0].Resources.Limits = limits.DeepCopy() + pod.Spec.Containers[0].Resources.Limits = limits.DeepCopy() + } + + var requests kapi.ResourceList + // we need this kind of statement to verify assignment to entry in nil map + if cpuRequest != "" || memoryRequest != "" { + requests = kapi.ResourceList{} + if cpuRequest != "" { + requests[kapi.ResourceCPU] = resource.MustParse(cpuRequest) + } + if memoryRequest != "" { + requests[kapi.ResourceMemory] = resource.MustParse(memoryRequest) + } + + pod.Spec.InitContainers[0].Resources.Requests = requests.DeepCopy() + pod.Spec.Containers[0].Resources.Requests = requests.DeepCopy() + } + + return pod +} + +func fakeUser() user.Info { + return &user.DefaultInfo{ + Name: "testuser", + } +} + +func testNamespace() *corev1.Namespace { + return &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespace", + }, + } +} + +func testManagedNamespace() *corev1.Namespace { + return &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "managed-namespace", + Annotations: map[string]string{ + namespaceAllowedAnnotation: fmt.Sprintf("%s,test", workloadTypeManagement), + }, + }, + } +} + +func testNode() *corev1.Node { + return &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node", + }, + } +} + +func testNodeWithManagementResource() *corev1.Node { + q := resource.NewQuantity(16000, resource.DecimalSI) + return &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "managed-node", + }, + Status: corev1.NodeStatus{ + Allocatable: corev1.ResourceList{ + managedCapacityLabel: *q, + }, + }, + } +} + +func testClusterInfraWithoutAnyStatusFields() *configv1.Infrastructure { + return &configv1.Infrastructure{ + ObjectMeta: metav1.ObjectMeta{ + Name: infraClusterName, + }, + } +} + +func testClusterSNOInfra() *configv1.Infrastructure { + return &configv1.Infrastructure{ + ObjectMeta: metav1.ObjectMeta{ + Name: infraClusterName, + }, + Status: configv1.InfrastructureStatus{ + APIServerURL: "test", + ControlPlaneTopology: configv1.SingleReplicaTopologyMode, + InfrastructureTopology: configv1.SingleReplicaTopologyMode, + CPUPartitioning: configv1.CPUPartitioningAllNodes, + }, + } +} + +func testClusterInfraWithoutWorkloadPartitioning() *configv1.Infrastructure { + infra := testClusterSNOInfra() + infra.Status.CPUPartitioning = configv1.CPUPartitioningNone + return infra +} diff --git a/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/doc.go b/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/doc.go new file mode 100644 index 0000000000000..bcd9c74ec4723 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/doc.go @@ -0,0 +1,16 @@ +package managementcpusoverride + +// The ManagementCPUOverride admission plugin replaces pod container CPU requests with a new management resource. +// It applies to all pods that: +// 1. are in an allowed namespace +// 2. and have the workload annotation. +// +// It also sets the new management resource request and limit and set resource annotation that CRI-O can +// recognize and apply the relevant changes. +// For more information, see - https://github.com/openshift/enhancements/pull/703 +// +// Conditions for CPUs requests deletion: +// 1. The namespace should have allowed annotation "workload.openshift.io/allowed": "management" +// 2. The pod should have management annotation: "workload.openshift.io/management": "{"effect": "PreferredDuringScheduling"}" +// 3. All nodes under the cluster should have new management resource - "management.workload.openshift.io/cores" +// 4. The CPU request deletion will not change the pod QoS class diff --git a/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/initializers.go b/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/initializers.go new file mode 100644 index 0000000000000..02fcd69ebbcb4 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/initializers.go @@ -0,0 +1,28 @@ +package managementcpusoverride + +import ( + "k8s.io/apiserver/pkg/admission" + + configv1informer "github.com/openshift/client-go/config/informers/externalversions/config/v1" +) + +func NewInitializer(infraInformer configv1informer.InfrastructureInformer) admission.PluginInitializer { + return &localInitializer{infraInformer: infraInformer} +} + +type WantsInfraInformer interface { + SetInfraInformer(informer configv1informer.InfrastructureInformer) + admission.InitializationValidator +} + +type localInitializer struct { + infraInformer configv1informer.InfrastructureInformer +} + +// Initialize will check the initialization interfaces implemented by each plugin +// and provide the appropriate initialization data +func (i *localInitializer) Initialize(plugin admission.Interface) { + if wants, ok := plugin.(WantsInfraInformer); ok { + wants.SetInfraInformer(i.infraInformer) + } +} diff --git a/openshift-kube-apiserver/admission/autoscaling/mixedcpus/admission.go b/openshift-kube-apiserver/admission/autoscaling/mixedcpus/admission.go new file mode 100644 index 0000000000000..61a7aa614ad5e --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/mixedcpus/admission.go @@ -0,0 +1,152 @@ +package mixedcpus + +import ( + "context" + "fmt" + "io" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride" + coreapi "k8s.io/kubernetes/pkg/apis/core" +) + +const ( + PluginName = "autoscaling.openshift.io/MixedCPUs" + annotationEnable = "enable" + // containerResourceRequestName is the name of the resource that should be specified under the container's request in the pod spec + containerResourceRequestName = "workload.openshift.io/enable-shared-cpus" + // runtimeAnnotationPrefix is the prefix for the annotation that is expected by the runtime + runtimeAnnotationPrefix = "cpu-shared.crio.io" + // namespaceAllowedAnnotation contains the namespace allowed annotation key + namespaceAllowedAnnotation = "workload.mixedcpus.openshift.io/allowed" +) + +var _ = initializer.WantsExternalKubeClientSet(&mixedCPUsMutation{}) +var _ = initializer.WantsExternalKubeInformerFactory(&mixedCPUsMutation{}) +var _ = admission.MutationInterface(&mixedCPUsMutation{}) + +type mixedCPUsMutation struct { + *admission.Handler + client kubernetes.Interface + podLister corev1listers.PodLister + podListerSynced func() bool + nsLister corev1listers.NamespaceLister + nsListerSynced func() bool +} + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, + func(config io.Reader) (admission.Interface, error) { + return &mixedCPUsMutation{ + Handler: admission.NewHandler(admission.Create), + }, nil + }) +} + +// SetExternalKubeClientSet implements the WantsExternalKubeClientSet interface. +func (s *mixedCPUsMutation) SetExternalKubeClientSet(client kubernetes.Interface) { + s.client = client +} + +func (s *mixedCPUsMutation) SetExternalKubeInformerFactory(kubeInformers informers.SharedInformerFactory) { + s.podLister = kubeInformers.Core().V1().Pods().Lister() + s.podListerSynced = kubeInformers.Core().V1().Pods().Informer().HasSynced + s.nsLister = kubeInformers.Core().V1().Namespaces().Lister() + s.nsListerSynced = kubeInformers.Core().V1().Namespaces().Informer().HasSynced +} + +func (s *mixedCPUsMutation) ValidateInitialization() error { + if s.client == nil { + return fmt.Errorf("%s plugin needs a kubernetes client", PluginName) + } + if s.podLister == nil { + return fmt.Errorf("%s did not get a pod lister", PluginName) + } + if s.podListerSynced == nil { + return fmt.Errorf("%s plugin needs a pod lister synced", PluginName) + } + if s.nsLister == nil { + return fmt.Errorf("%s did not get a namespace lister", PluginName) + } + if s.nsListerSynced == nil { + return fmt.Errorf("%s plugin needs a namespace lister synced", PluginName) + } + return nil +} + +func (s *mixedCPUsMutation) Admit(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) error { + if attr.GetResource().GroupResource() != coreapi.Resource("pods") || attr.GetSubresource() != "" { + return nil + } + + pod, ok := attr.GetObject().(*coreapi.Pod) + if !ok { + return admission.NewForbidden(attr, fmt.Errorf("%s unexpected object: %#v", attr.GetObject(), PluginName)) + } + + for i := 0; i < len(pod.Spec.Containers); i++ { + cnt := &pod.Spec.Containers[i] + requested, v := isContainerRequestForSharedCPUs(cnt) + if !requested { + continue + } + ns, err := s.getPodNs(ctx, pod.Namespace) + if err != nil { + return fmt.Errorf("%s %w", PluginName, err) + } + _, found := ns.Annotations[namespaceAllowedAnnotation] + if !found { + return admission.NewForbidden(attr, fmt.Errorf("%s pod %s namespace %s is not allowed for %s resource request", PluginName, pod.Name, pod.Namespace, containerResourceRequestName)) + } + if !managementcpusoverride.IsGuaranteed(pod.Spec.Containers) { + return admission.NewForbidden(attr, fmt.Errorf("%s %s/%s requests for %q resource but pod is not Guaranteed QoS class", PluginName, pod.Name, cnt.Name, containerResourceRequestName)) + } + if v.Value() > 1 { + return admission.NewForbidden(attr, fmt.Errorf("%s %s/%s more than a single %q resource is forbiden, please set the request to 1 or remove it", PluginName, pod.Name, cnt.Name, containerResourceRequestName)) + } + addRuntimeAnnotation(pod, cnt.Name) + } + return nil +} + +func (s *mixedCPUsMutation) getPodNs(ctx context.Context, nsName string) (*v1.Namespace, error) { + ns, err := s.nsLister.Get(nsName) + if err != nil { + if !errors.IsNotFound(err) { + return nil, fmt.Errorf("%s failed to retrieve namespace %q from lister; %w", PluginName, nsName, err) + } + // cache didn't update fast enough + ns, err = s.client.CoreV1().Namespaces().Get(ctx, nsName, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("%s failed to retrieve namespace %q from api server; %w", PluginName, nsName, err) + } + } + return ns, nil +} + +func isContainerRequestForSharedCPUs(container *coreapi.Container) (bool, resource.Quantity) { + for rName, quan := range container.Resources.Requests { + if rName == containerResourceRequestName { + return true, quan + } + } + return false, resource.Quantity{} +} + +func addRuntimeAnnotation(pod *coreapi.Pod, cntName string) { + if pod.Annotations == nil { + pod.Annotations = map[string]string{} + } + pod.Annotations[getRuntimeAnnotationName(cntName)] = annotationEnable +} + +func getRuntimeAnnotationName(cntName string) string { + return fmt.Sprintf("%s/%s", runtimeAnnotationPrefix, cntName) +} diff --git a/openshift-kube-apiserver/admission/autoscaling/mixedcpus/admission_test.go b/openshift-kube-apiserver/admission/autoscaling/mixedcpus/admission_test.go new file mode 100644 index 0000000000000..89d6dab671030 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/mixedcpus/admission_test.go @@ -0,0 +1,243 @@ +package mixedcpus + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/kubernetes/fake" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + coreapi "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/test/e2e/framework/pod" +) + +func TestAdmit(t *testing.T) { + testCases := []struct { + name string + pod *coreapi.Pod + ns *corev1.Namespace + expectedPodStatus *errors.StatusError + // container names that should have the runtime annotation + expectedContainersWithAnnotations []string + }{ + { + name: "one container, requests single resources", + pod: makePod("test1", withNs("foo"), + withGuaranteedContainer("cnt1", + map[coreapi.ResourceName]resource.Quantity{ + coreapi.ResourceCPU: resource.MustParse("1"), + coreapi.ResourceMemory: resource.MustParse("100Mi"), + containerResourceRequestName: resource.MustParse("1"), + }, + )), + ns: makeNs("foo", map[string]string{namespaceAllowedAnnotation: ""}), + expectedContainersWithAnnotations: []string{"cnt1"}, + expectedPodStatus: nil, + }, + { + name: "two containers, only one of them requests single resource", + pod: makePod("test1", withNs("foo"), + withGuaranteedContainer("cnt1", + map[coreapi.ResourceName]resource.Quantity{ + coreapi.ResourceCPU: resource.MustParse("1"), + coreapi.ResourceMemory: resource.MustParse("100Mi"), + }, + ), + withGuaranteedContainer("cnt2", + map[coreapi.ResourceName]resource.Quantity{ + coreapi.ResourceCPU: resource.MustParse("1"), + coreapi.ResourceMemory: resource.MustParse("100Mi"), + containerResourceRequestName: resource.MustParse("1"), + }, + )), + ns: makeNs("foo", map[string]string{namespaceAllowedAnnotation: ""}), + expectedContainersWithAnnotations: []string{"cnt2"}, + expectedPodStatus: nil, + }, + { + name: "two containers, one of them requests more than single resource", + pod: makePod("test1", withNs("bar"), + withGuaranteedContainer("cnt1", + map[coreapi.ResourceName]resource.Quantity{ + coreapi.ResourceCPU: resource.MustParse("1"), + coreapi.ResourceMemory: resource.MustParse("100Mi"), + containerResourceRequestName: resource.MustParse("1"), + }, + ), + withGuaranteedContainer("cnt2", + map[coreapi.ResourceName]resource.Quantity{ + coreapi.ResourceCPU: resource.MustParse("1"), + coreapi.ResourceMemory: resource.MustParse("100Mi"), + containerResourceRequestName: resource.MustParse("2"), + }, + )), + ns: makeNs("bar", map[string]string{namespaceAllowedAnnotation: ""}), + expectedContainersWithAnnotations: []string{}, + expectedPodStatus: errors.NewForbidden(schema.GroupResource{}, "", nil), + }, + { + name: "one container, pod is not Guaranteed QoS class", + pod: makePod("test1", withNs("bar"), + withContainer("cnt1", + map[coreapi.ResourceName]resource.Quantity{ + coreapi.ResourceCPU: resource.MustParse("1"), + coreapi.ResourceMemory: resource.MustParse("100Mi"), + containerResourceRequestName: resource.MustParse("1"), + }, + ), + ), + ns: makeNs("bar", map[string]string{namespaceAllowedAnnotation: ""}), + expectedContainersWithAnnotations: []string{}, + expectedPodStatus: errors.NewForbidden(schema.GroupResource{}, "", nil), + }, + { + name: "one container, pod is not in allowed namespace", + pod: makePod("test1", + withGuaranteedContainer("cnt1", + map[coreapi.ResourceName]resource.Quantity{ + coreapi.ResourceCPU: resource.MustParse("1"), + coreapi.ResourceMemory: resource.MustParse("100Mi"), + containerResourceRequestName: resource.MustParse("1"), + }, + ), + ), + ns: makeNs("bar", map[string]string{namespaceAllowedAnnotation: ""}), + expectedContainersWithAnnotations: []string{}, + expectedPodStatus: errors.NewForbidden(schema.GroupResource{}, "", nil), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + testPod := tc.pod + mutation, err := getMockMixedCPUsMutation(testPod, tc.ns) + if err != nil { + t.Fatalf("%v", err) + } + attrs := admission.NewAttributesRecord(testPod, + nil, + schema.GroupVersionKind{}, + testPod.Namespace, + testPod.Name, + coreapi.Resource("pods").WithVersion("version"), + "", + admission.Create, + nil, + false, + fakeUser()) + + err = mutation.Admit(context.TODO(), attrs, nil) + if err != nil && tc.expectedPodStatus == nil { + t.Errorf("%s: unexpected error %v", tc.name, err) + } + + if err != nil { + if !errors.IsForbidden(tc.expectedPodStatus) { + t.Errorf("%s: forbidden error was expected. got %v instead", tc.name, err) + } + } + + testPod, _ = attrs.GetObject().(*coreapi.Pod) + for _, cntName := range tc.expectedContainersWithAnnotations { + if v, ok := testPod.Annotations[getRuntimeAnnotationName(cntName)]; !ok || v != annotationEnable { + t.Errorf("%s: container %s is missing runtime annotation", tc.name, cntName) + } + } + }) + } +} + +func fakeUser() user.Info { + return &user.DefaultInfo{ + Name: "testuser", + } +} + +func makeNs(name string, annotations map[string]string) *corev1.Namespace { + return &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Annotations: annotations, + }, + } +} + +func makePod(name string, opts ...func(pod *coreapi.Pod)) *coreapi.Pod { + p := &coreapi.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + for _, opt := range opts { + opt(p) + } + return p +} + +func withContainer(name string, requests coreapi.ResourceList) func(p *coreapi.Pod) { + return func(p *coreapi.Pod) { + cnt := coreapi.Container{ + Name: name, + Image: pod.GetDefaultTestImage(), + Resources: coreapi.ResourceRequirements{ + Requests: requests, + }, + } + p.Spec.Containers = append(p.Spec.Containers, cnt) + } +} + +func withGuaranteedContainer(name string, requests coreapi.ResourceList) func(p *coreapi.Pod) { + return func(p *coreapi.Pod) { + withContainer(name, requests)(p) + for i := 0; i < len(p.Spec.Containers); i++ { + cnt := &p.Spec.Containers[i] + if cnt.Name == name { + cnt.Resources.Limits = cnt.Resources.Requests + } + } + } +} + +func withNs(name string) func(p *coreapi.Pod) { + return func(p *coreapi.Pod) { + p.Namespace = name + } +} + +func fakePodLister(pod *coreapi.Pod) corev1listers.PodLister { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + if pod != nil { + _ = indexer.Add(pod) + } + return corev1listers.NewPodLister(indexer) +} + +func fakeNsLister(ns *corev1.Namespace) corev1listers.NamespaceLister { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + _ = indexer.Add(ns) + return corev1listers.NewNamespaceLister(indexer) +} + +func getMockMixedCPUsMutation(pod *coreapi.Pod, ns *corev1.Namespace) (*mixedCPUsMutation, error) { + m := &mixedCPUsMutation{ + Handler: admission.NewHandler(admission.Create), + client: &fake.Clientset{}, + podListerSynced: func() bool { return true }, + podLister: fakePodLister(pod), + nsListerSynced: func() bool { return true }, + nsLister: fakeNsLister(ns), + } + if err := m.ValidateInitialization(); err != nil { + return nil, err + } + + return m, nil +} diff --git a/openshift-kube-apiserver/admission/autoscaling/mixedcpus/doc.go b/openshift-kube-apiserver/admission/autoscaling/mixedcpus/doc.go new file mode 100644 index 0000000000000..bac1a688e1e4b --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/mixedcpus/doc.go @@ -0,0 +1,10 @@ +package mixedcpus + +//The admission should provide the following functionalities: +//1. In case a user specifies more than a single `openshift.io/enable-shared-cpus` resource, +//it rejects the pod request with an error explaining the user how to fix its pod spec. +//2. It rejects a non-guaranteed pod which is asking for `openshift.io/enable-shared-cpus` resource. +//3. It adds an annotation `cpu-shared.crio.io` that will be used to tell the runtime that shared cpus were requested. +//For every container requested for shared cpus, it adds an annotation with the following scheme: +//`cpu-shared.crio.io/` +//4. It validates that the pod deployed in a namespace that has `workload.mixedcpus.openshift.io/allowed` annotation. diff --git a/openshift-kube-apiserver/admission/autoscaling/runonceduration/admission.go b/openshift-kube-apiserver/admission/autoscaling/runonceduration/admission.go new file mode 100644 index 0000000000000..9326205f9b333 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/runonceduration/admission.go @@ -0,0 +1,148 @@ +package runonceduration + +import ( + "context" + "errors" + "fmt" + "io" + "strconv" + + "k8s.io/klog/v2" + + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/informers" + kapi "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/utils/integer" + + "github.com/openshift/library-go/pkg/config/helpers" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration" + v1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation" +) + +func Register(plugins *admission.Plugins) { + plugins.Register("autoscaling.openshift.io/RunOnceDuration", + func(config io.Reader) (admission.Interface, error) { + pluginConfig, err := readConfig(config) + if err != nil { + return nil, err + } + if pluginConfig == nil { + klog.Infof("Admission plugin %q is not configured so it will be disabled.", "autoscaling.openshift.io/RunOnceDuration") + return nil, nil + } + return NewRunOnceDuration(pluginConfig), nil + }) +} + +func readConfig(reader io.Reader) (*runonceduration.RunOnceDurationConfig, error) { + obj, err := helpers.ReadYAMLToInternal(reader, runonceduration.Install, v1.Install) + if err != nil { + return nil, err + } + if obj == nil { + return nil, nil + } + config, ok := obj.(*runonceduration.RunOnceDurationConfig) + if !ok { + return nil, fmt.Errorf("unexpected config object %#v", obj) + } + errs := validation.ValidateRunOnceDurationConfig(config) + if len(errs) > 0 { + return nil, errs.ToAggregate() + } + return config, nil +} + +// NewRunOnceDuration creates a new RunOnceDuration admission plugin +func NewRunOnceDuration(config *runonceduration.RunOnceDurationConfig) admission.Interface { + return &runOnceDuration{ + Handler: admission.NewHandler(admission.Create), + config: config, + } +} + +type runOnceDuration struct { + *admission.Handler + config *runonceduration.RunOnceDurationConfig + nsLister corev1listers.NamespaceLister +} + +var _ = initializer.WantsExternalKubeInformerFactory(&runOnceDuration{}) + +func (a *runOnceDuration) Admit(ctx context.Context, attributes admission.Attributes, _ admission.ObjectInterfaces) error { + switch { + case a.config == nil, + attributes.GetResource().GroupResource() != kapi.Resource("pods"), + len(attributes.GetSubresource()) > 0: + return nil + } + pod, ok := attributes.GetObject().(*kapi.Pod) + if !ok { + return admission.NewForbidden(attributes, fmt.Errorf("unexpected object: %#v", attributes.GetObject())) + } + + // Only update pods with a restart policy of Never or OnFailure + switch pod.Spec.RestartPolicy { + case kapi.RestartPolicyNever, + kapi.RestartPolicyOnFailure: + // continue + default: + return nil + } + + appliedProjectLimit, err := a.applyProjectAnnotationLimit(attributes.GetNamespace(), pod) + if err != nil { + return admission.NewForbidden(attributes, err) + } + + if !appliedProjectLimit && a.config.ActiveDeadlineSecondsOverride != nil { + pod.Spec.ActiveDeadlineSeconds = int64MinP(a.config.ActiveDeadlineSecondsOverride, pod.Spec.ActiveDeadlineSeconds) + } + return nil +} + +func (a *runOnceDuration) SetExternalKubeInformerFactory(kubeInformers informers.SharedInformerFactory) { + a.nsLister = kubeInformers.Core().V1().Namespaces().Lister() +} + +func (a *runOnceDuration) ValidateInitialization() error { + if a.nsLister == nil { + return errors.New("autoscaling.openshift.io/RunOnceDuration plugin requires a namespace listers") + } + return nil +} + +func (a *runOnceDuration) applyProjectAnnotationLimit(namespace string, pod *kapi.Pod) (bool, error) { + ns, err := a.nsLister.Get(namespace) + if err != nil { + return false, fmt.Errorf("error looking up pod namespace: %v", err) + } + if ns.Annotations == nil { + return false, nil + } + limit, hasLimit := ns.Annotations[runonceduration.ActiveDeadlineSecondsLimitAnnotation] + if !hasLimit { + return false, nil + } + limitInt64, err := strconv.ParseInt(limit, 10, 64) + if err != nil { + return false, fmt.Errorf("cannot parse the ActiveDeadlineSeconds limit (%s) for project %s: %v", limit, ns.Name, err) + } + pod.Spec.ActiveDeadlineSeconds = int64MinP(&limitInt64, pod.Spec.ActiveDeadlineSeconds) + return true, nil +} + +func int64MinP(a, b *int64) *int64 { + switch { + case a == nil: + return b + case b == nil: + return a + default: + c := integer.Int64Min(*a, *b) + return &c + } +} diff --git a/openshift-kube-apiserver/admission/autoscaling/runonceduration/admission_test.go b/openshift-kube-apiserver/admission/autoscaling/runonceduration/admission_test.go new file mode 100644 index 0000000000000..856d32801bfbb --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/runonceduration/admission_test.go @@ -0,0 +1,215 @@ +package runonceduration + +import ( + "bytes" + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apiserver/pkg/admission" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + kapi "k8s.io/kubernetes/pkg/apis/core" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration" +) + +func fakeNamespaceLister(projectAnnotations map[string]string) corev1listers.NamespaceLister { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + ns := &corev1.Namespace{} + ns.Name = "default" + ns.Annotations = projectAnnotations + indexer.Add(ns) + return corev1listers.NewNamespaceLister(indexer) +} + +func testConfig(n *int64) *runonceduration.RunOnceDurationConfig { + return &runonceduration.RunOnceDurationConfig{ + ActiveDeadlineSecondsOverride: n, + } +} + +func testRunOncePod() *kapi.Pod { + pod := &kapi.Pod{} + pod.Spec.RestartPolicy = kapi.RestartPolicyNever + return pod +} + +func testRestartOnFailurePod() *kapi.Pod { + pod := &kapi.Pod{} + pod.Spec.RestartPolicy = kapi.RestartPolicyOnFailure + return pod +} + +func testRunOncePodWithDuration(n int64) *kapi.Pod { + pod := testRunOncePod() + pod.Spec.ActiveDeadlineSeconds = &n + return pod +} + +func testRestartAlwaysPod() *kapi.Pod { + pod := &kapi.Pod{} + pod.Spec.RestartPolicy = kapi.RestartPolicyAlways + return pod +} + +func int64p(n int64) *int64 { + return &n +} + +func TestRunOnceDurationAdmit(t *testing.T) { + tests := []struct { + name string + config *runonceduration.RunOnceDurationConfig + pod *kapi.Pod + projectAnnotations map[string]string + expectedActiveDeadlineSeconds *int64 + }{ + { + name: "expect globally configured duration to be set", + config: testConfig(int64p(10)), + pod: testRunOncePod(), + expectedActiveDeadlineSeconds: int64p(10), + }, + { + name: "empty config, no duration to be set", + config: testConfig(nil), + pod: testRunOncePod(), + expectedActiveDeadlineSeconds: nil, + }, + { + name: "expect configured duration to not limit lower existing duration", + config: testConfig(int64p(10)), + pod: testRunOncePodWithDuration(5), + expectedActiveDeadlineSeconds: int64p(5), + }, + { + name: "expect empty config to not limit existing duration", + config: testConfig(nil), + pod: testRunOncePodWithDuration(5), + expectedActiveDeadlineSeconds: int64p(5), + }, + { + name: "expect project limit to be used with nil global value", + config: testConfig(nil), + pod: testRunOncePodWithDuration(2000), + projectAnnotations: map[string]string{ + runonceduration.ActiveDeadlineSecondsLimitAnnotation: "1000", + }, + expectedActiveDeadlineSeconds: int64p(1000), + }, + { + name: "expect project limit to not limit a smaller set value", + config: testConfig(nil), + pod: testRunOncePodWithDuration(10), + projectAnnotations: map[string]string{ + runonceduration.ActiveDeadlineSecondsLimitAnnotation: "1000", + }, + expectedActiveDeadlineSeconds: int64p(10), + }, + { + name: "expect project limit to have priority over global config value", + config: testConfig(int64p(10)), + pod: testRunOncePodWithDuration(2000), + projectAnnotations: map[string]string{ + runonceduration.ActiveDeadlineSecondsLimitAnnotation: "1000", + }, + expectedActiveDeadlineSeconds: int64p(1000), + }, + { + name: "make no change to a pod that is not a run-once pod", + config: testConfig(int64p(10)), + pod: testRestartAlwaysPod(), + expectedActiveDeadlineSeconds: nil, + }, + { + name: "update a pod that has a RestartOnFailure policy", + config: testConfig(int64p(10)), + pod: testRestartOnFailurePod(), + expectedActiveDeadlineSeconds: int64p(10), + }, + } + + for _, tc := range tests { + admissionPlugin := NewRunOnceDuration(tc.config) + admissionPlugin.(*runOnceDuration).nsLister = fakeNamespaceLister(tc.projectAnnotations) + pod := tc.pod + attrs := admission.NewAttributesRecord(pod, nil, kapi.Kind("Pod").WithVersion("version"), "default", "test", kapi.Resource("pods").WithVersion("version"), "", admission.Create, nil, false, nil) + if err := admissionPlugin.(admission.MutationInterface).Admit(context.TODO(), attrs, nil); err != nil { + t.Errorf("%s: unexpected mutating admission error: %v", tc.name, err) + continue + } + + switch { + case tc.expectedActiveDeadlineSeconds == nil && pod.Spec.ActiveDeadlineSeconds == nil: + // continue + case tc.expectedActiveDeadlineSeconds == nil && pod.Spec.ActiveDeadlineSeconds != nil: + t.Errorf("%s: expected nil ActiveDeadlineSeconds. Got: %d", tc.name, *pod.Spec.ActiveDeadlineSeconds) + case tc.expectedActiveDeadlineSeconds != nil && pod.Spec.ActiveDeadlineSeconds == nil: + t.Errorf("%s: unexpected nil ActiveDeadlineSeconds.", tc.name) + case *pod.Spec.ActiveDeadlineSeconds != *tc.expectedActiveDeadlineSeconds: + t.Errorf("%s: unexpected active deadline seconds: %d", tc.name, *pod.Spec.ActiveDeadlineSeconds) + } + } +} + +func TestReadConfig(t *testing.T) { + configStr := `apiVersion: autoscaling.openshift.io/v1 +kind: RunOnceDurationConfig +activeDeadlineSecondsOverride: 3600 +` + buf := bytes.NewBufferString(configStr) + config, err := readConfig(buf) + if err != nil { + t.Fatalf("unexpected error reading config: %v", err) + } + if config.ActiveDeadlineSecondsOverride == nil { + t.Fatalf("nil value for ActiveDeadlineSecondsLimit") + } + if *config.ActiveDeadlineSecondsOverride != 3600 { + t.Errorf("unexpected value for ActiveDeadlineSecondsLimit: %d", config.ActiveDeadlineSecondsOverride) + } +} + +func TestInt64MinP(t *testing.T) { + ten := int64(10) + twenty := int64(20) + tests := []struct { + a, b, expected *int64 + }{ + { + a: &ten, + b: nil, + expected: &ten, + }, + { + a: nil, + b: &ten, + expected: &ten, + }, + { + a: &ten, + b: &twenty, + expected: &ten, + }, + { + a: nil, + b: nil, + expected: nil, + }, + } + + for _, test := range tests { + actual := int64MinP(test.a, test.b) + switch { + case actual == nil && test.expected != nil, + test.expected == nil && actual != nil: + t.Errorf("unexpected %v for %#v", actual, test) + continue + case actual == nil: + continue + case *actual != *test.expected: + t.Errorf("unexpected: %v for %#v", actual, test) + } + } +} diff --git a/openshift-kube-apiserver/admission/autoscaling/runonceduration/doc.go b/openshift-kube-apiserver/admission/autoscaling/runonceduration/doc.go new file mode 100644 index 0000000000000..15a3a3ae39143 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/runonceduration/doc.go @@ -0,0 +1,21 @@ +/* +Package runonceduration contains the RunOnceDuration admission control plugin. +The plugin allows overriding the ActiveDeadlineSeconds for pods that have a +RestartPolicy of RestartPolicyNever (run once). If configured to allow a project +annotation override, and an annotation exists in the pod's namespace of: + + openshift.io/active-deadline-seconds-override + +the value of the annotation will take precedence over the globally configured +value in the plugin's configuration. + +# Configuration + +The plugin is configured via a RunOnceDurationConfig object: + + apiVersion: v1 + kind: RunOnceDurationConfig + enabled: true + activeDeadlineSecondsOverride: 3600 +*/ +package runonceduration diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/apirequestcount/validate_apirequestcount.go b/openshift-kube-apiserver/admission/customresourcevalidation/apirequestcount/validate_apirequestcount.go new file mode 100644 index 0000000000000..c35a7a1fad20a --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/apirequestcount/validate_apirequestcount.go @@ -0,0 +1,109 @@ +package apirequestcount + +import ( + "context" + "fmt" + "io" + "strings" + + apiv1 "github.com/openshift/api/apiserver/v1" + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateAPIRequestCount" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return newValidateAPIRequestCount() + }) +} + +func newValidateAPIRequestCount() (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + apiv1.GroupVersion.WithResource("apirequestcounts").GroupResource(): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + apiv1.GroupVersion.WithKind("APIRequestCount"): apiRequestCountV1{}, + }) +} + +type apiRequestCountV1 struct { +} + +func toAPIRequestCountV1(uncastObj runtime.Object) (*apiv1.APIRequestCount, field.ErrorList) { + obj, ok := uncastObj.(*apiv1.APIRequestCount) + if !ok { + return nil, field.ErrorList{ + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"APIRequestCount"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"apiserver.openshift.io/v1"}), + } + } + + return obj, nil +} + +func (a apiRequestCountV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, errs := toAPIRequestCountV1(uncastObj) + if len(errs) > 0 { + return errs + } + errs = append(errs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, requireNameGVR, field.NewPath("metadata"))...) + return errs +} + +// requireNameGVR is a name validation function that requires the name to be of the form 'resource.version.group'. +func requireNameGVR(name string, _ bool) []string { + if _, err := NameToResource(name); err != nil { + return []string{err.Error()} + } + return nil +} + +// NameToResource parses a name of the form 'resource.version.group'. +func NameToResource(name string) (schema.GroupVersionResource, error) { + segments := strings.SplitN(name, ".", 3) + result := schema.GroupVersionResource{Resource: segments[0]} + switch len(segments) { + case 3: + result.Group = segments[2] + fallthrough + case 2: + result.Version = segments[1] + default: + return schema.GroupVersionResource{}, fmt.Errorf("apirequestcount %s: name must be of the form 'resource.version.group'", name) + } + return result, nil +} + +func (a apiRequestCountV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toAPIRequestCountV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toAPIRequestCountV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + return errs +} + +func (a apiRequestCountV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toAPIRequestCountV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toAPIRequestCountV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/apirequestcount/validate_apirequestcount_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/apirequestcount/validate_apirequestcount_test.go new file mode 100644 index 0000000000000..f69dd194fcc30 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/apirequestcount/validate_apirequestcount_test.go @@ -0,0 +1,35 @@ +package apirequestcount + +import ( + "context" + "testing" + + apiv1 "github.com/openshift/api/apiserver/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestApiRequestCountV1_ValidateCreate(t *testing.T) { + testCases := []struct { + name string + errExpected bool + }{ + {"nogood", true}, + {"resource.version", false}, + {"resource.groupnonsense", false}, + {"resource.version.group", false}, + {"resource.version.group.with.dots", false}, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + errs := apiRequestCountV1{}.ValidateCreate(context.TODO(), &apiv1.APIRequestCount{ObjectMeta: metav1.ObjectMeta{Name: tc.name}}) + if tc.errExpected != (len(errs) != 0) { + s := "did not expect " + if tc.errExpected { + s = "expected " + } + t.Errorf("%serrors, but got %d errors: %v", s, len(errs), errs) + } + }) + } + +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validate_apiserver.go b/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validate_apiserver.go new file mode 100644 index 0000000000000..337cbb686a2a5 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validate_apiserver.go @@ -0,0 +1,259 @@ +package apiserver + +import ( + "context" + "fmt" + "regexp" + "strings" + + "k8s.io/apimachinery/pkg/api/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" + + configv1 "github.com/openshift/api/config/v1" + configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + libgocrypto "github.com/openshift/library-go/pkg/crypto" +) + +func toAPIServerV1(uncastObj runtime.Object) (*configv1.APIServer, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + errs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.APIServer) + if !ok { + return nil, append(errs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"APIServer"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type apiserverV1 struct { + infrastructureGetter func() configv1client.InfrastructuresGetter +} + +func (a apiserverV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, errs := toAPIServerV1(uncastObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + errs = append(errs, validateAPIServerSpecCreate(obj.Spec)...) + errs = append(errs, a.validateSNINames(obj)...) + + return errs +} + +func (a apiserverV1) validateSNINames(obj *configv1.APIServer) field.ErrorList { + errs := field.ErrorList{} + if len(obj.Spec.ServingCerts.NamedCertificates) == 0 { + return errs + } + + infrastructure, err := a.infrastructureGetter().Infrastructures().Get(context.TODO(), "cluster", metav1.GetOptions{}) + if err != nil { + errs = append(errs, field.InternalError(field.NewPath("metadata"), err)) + } + for i, currSNI := range obj.Spec.ServingCerts.NamedCertificates { + // if names are specified, confirm they do not match + // if names are not specified, the cert can still match, but only the operator resolves the secrets down. We gain a lot of benefit by being sure + // we don't allow an explicit override of these values + for j, currName := range currSNI.Names { + path := field.NewPath("spec").Child("servingCerts").Index(i).Child("names").Index(j) + if currName == infrastructure.Status.APIServerInternalURL { + errs = append(errs, field.Invalid(path, currName, fmt.Sprintf("may not match internal loadbalancer: %q", infrastructure.Status.APIServerInternalURL))) + continue + } + if strings.HasSuffix(currName, ".*") { + withoutSuffix := currName[0 : len(currName)-2] + if strings.HasPrefix(infrastructure.Status.APIServerInternalURL, withoutSuffix) { + errs = append(errs, field.Invalid(path, currName, fmt.Sprintf("may not match internal loadbalancer: %q", infrastructure.Status.APIServerInternalURL))) + } + } + } + } + + return errs +} + +func (a apiserverV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toAPIServerV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toAPIServerV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateAPIServerSpecUpdate(obj.Spec, oldObj.Spec)...) + errs = append(errs, a.validateSNINames(obj)...) + + return errs +} + +func (apiserverV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toAPIServerV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toAPIServerV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateAPIServerStatus(obj.Status)...) + + return errs +} + +func validateAPIServerSpecCreate(spec configv1.APIServerSpec) field.ErrorList { + errs := field.ErrorList{} + specPath := field.NewPath("spec") + + errs = append(errs, validateAdditionalCORSAllowedOrigins(specPath.Child("additionalCORSAllowedOrigins"), spec.AdditionalCORSAllowedOrigins)...) + errs = append(errs, validateTLSSecurityProfile(specPath.Child("tlsSecurityProfile"), spec.TLSSecurityProfile)...) + + return errs +} + +func validateAPIServerSpecUpdate(newSpec, oldSpec configv1.APIServerSpec) field.ErrorList { + errs := field.ErrorList{} + specPath := field.NewPath("spec") + + errs = append(errs, validateAdditionalCORSAllowedOrigins(specPath.Child("additionalCORSAllowedOrigins"), newSpec.AdditionalCORSAllowedOrigins)...) + errs = append(errs, validateTLSSecurityProfile(specPath.Child("tlsSecurityProfile"), newSpec.TLSSecurityProfile)...) + + return errs +} + +func validateAPIServerStatus(status configv1.APIServerStatus) field.ErrorList { + errs := field.ErrorList{} + + // TODO + + return errs +} + +func validateAdditionalCORSAllowedOrigins(fieldPath *field.Path, cors []string) field.ErrorList { + errs := field.ErrorList{} + + for i, re := range cors { + if _, err := regexp.Compile(re); err != nil { + errs = append(errs, field.Invalid(fieldPath.Index(i), re, fmt.Sprintf("not a valid regular expression: %v", err))) + } + } + + return errs +} + +func validateTLSSecurityProfile(fieldPath *field.Path, profile *configv1.TLSSecurityProfile) field.ErrorList { + errs := field.ErrorList{} + + if profile == nil { + return errs + } + + errs = append(errs, validateTLSSecurityProfileType(fieldPath, profile)...) + + if profile.Type == configv1.TLSProfileCustomType && profile.Custom != nil { + errs = append(errs, validateCipherSuites(fieldPath.Child("custom", "ciphers"), profile.Custom.Ciphers, profile.Custom.MinTLSVersion)...) + errs = append(errs, validateMinTLSVersion(fieldPath.Child("custom", "minTLSVersion"), profile.Custom.MinTLSVersion)...) + } + + return errs +} + +func validateTLSSecurityProfileType(fieldPath *field.Path, profile *configv1.TLSSecurityProfile) field.ErrorList { + const typeProfileMismatchFmt = "type set to %s, but the corresponding field is unset" + typePath := fieldPath.Child("type") + + errs := field.ErrorList{} + + availableTypes := []string{ + string(configv1.TLSProfileOldType), + string(configv1.TLSProfileIntermediateType), + string(configv1.TLSProfileCustomType), + } + + switch profile.Type { + case "": + if profile.Old != nil || profile.Intermediate != nil || profile.Modern != nil || profile.Custom != nil { + errs = append(errs, field.Required(typePath, "one of the profiles is set but 'type' field is empty")) + } + case configv1.TLSProfileOldType: + if profile.Old == nil { + errs = append(errs, field.Required(fieldPath.Child("old"), fmt.Sprintf(typeProfileMismatchFmt, profile.Type))) + } + case configv1.TLSProfileIntermediateType: + if profile.Intermediate == nil { + errs = append(errs, field.Required(fieldPath.Child("intermediate"), fmt.Sprintf(typeProfileMismatchFmt, profile.Type))) + } + case configv1.TLSProfileModernType: + errs = append(errs, field.NotSupported(fieldPath.Child("type"), profile.Type, availableTypes)) + case configv1.TLSProfileCustomType: + if profile.Custom == nil { + errs = append(errs, field.Required(fieldPath.Child("custom"), fmt.Sprintf(typeProfileMismatchFmt, profile.Type))) + } + default: + errs = append(errs, field.Invalid(typePath, profile.Type, fmt.Sprintf("unknown type, valid values are: %v", availableTypes))) + } + + return errs +} + +func validateCipherSuites(fieldPath *field.Path, suites []string, version configv1.TLSProtocolVersion) field.ErrorList { + errs := field.ErrorList{} + + if ianaSuites := libgocrypto.OpenSSLToIANACipherSuites(suites); len(ianaSuites) == 0 { + errs = append(errs, field.Invalid(fieldPath, suites, "no supported cipher suite found")) + } + + // Return an error if it is missing ECDHE_RSA_WITH_AES_128_GCM_SHA256 or + // ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 to prevent the http2 Server + // configuration to return an error when http2 required cipher suites aren't + // provided. + // See: go/x/net/http2.ConfigureServer for futher information. + if version < configv1.VersionTLS13 && !haveRequiredHTTP2CipherSuites(suites) { + errs = append(errs, field.Invalid(fieldPath, suites, "http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of ECDHE-RSA-AES128-GCM-SHA256 or ECDHE-ECDSA-AES128-GCM-SHA256)")) + } + + return errs +} + +func haveRequiredHTTP2CipherSuites(suites []string) bool { + for _, s := range suites { + switch s { + case "ECDHE-RSA-AES128-GCM-SHA256", + // Alternative MTI cipher to not discourage ECDSA-only servers. + // See http://golang.org/cl/30721 for further information. + "ECDHE-ECDSA-AES128-GCM-SHA256": + return true + } + } + return false +} + +func validateMinTLSVersion(fieldPath *field.Path, version configv1.TLSProtocolVersion) field.ErrorList { + errs := field.ErrorList{} + + if version == configv1.VersionTLS13 { + return append(errs, field.NotSupported(fieldPath, version, []string{string(configv1.VersionTLS10), string(configv1.VersionTLS11), string(configv1.VersionTLS12)})) + } + + if _, err := libgocrypto.TLSVersion(string(version)); err != nil { + errs = append(errs, field.Invalid(fieldPath, version, err.Error())) + } + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validate_apiserver_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validate_apiserver_test.go new file mode 100644 index 0000000000000..54c072363c823 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validate_apiserver_test.go @@ -0,0 +1,286 @@ +package apiserver + +import ( + "testing" + + configv1 "github.com/openshift/api/config/v1" + configclientfake "github.com/openshift/client-go/config/clientset/versioned/fake" + configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func TestValidateSNINames(t *testing.T) { + expectNoErrors := func(t *testing.T, errs field.ErrorList) { + t.Helper() + if len(errs) > 0 { + t.Fatal(errs) + } + } + + tests := []struct { + name string + + internalName string + apiserver *configv1.APIServer + + validateErrors func(t *testing.T, errs field.ErrorList) + }{ + { + name: "no sni", + internalName: "internal.host.com", + apiserver: &configv1.APIServer{}, + validateErrors: expectNoErrors, + }, + { + name: "allowed sni", + internalName: "internal.host.com", + apiserver: &configv1.APIServer{ + Spec: configv1.APIServerSpec{ + ServingCerts: configv1.APIServerServingCerts{ + NamedCertificates: []configv1.APIServerNamedServingCert{ + { + Names: []string{"external.host.com", "somwhere.else.*"}, + }, + }, + }, + }, + }, + validateErrors: expectNoErrors, + }, + { + name: "directly invalid sni", + internalName: "internal.host.com", + apiserver: &configv1.APIServer{ + Spec: configv1.APIServerSpec{ + ServingCerts: configv1.APIServerServingCerts{ + NamedCertificates: []configv1.APIServerNamedServingCert{ + {Names: []string{"external.host.com", "somwhere.else.*"}}, + {Names: []string{"foo.bar", "internal.host.com"}}, + }, + }, + }, + }, + validateErrors: func(t *testing.T, errs field.ErrorList) { + t.Helper() + if len(errs) != 1 { + t.Fatal(errs) + } + if errs[0].Error() != `spec.servingCerts[1].names[1]: Invalid value: "internal.host.com": may not match internal loadbalancer: "internal.host.com"` { + t.Error(errs[0]) + } + }, + }, + { + name: "wildcard invalid sni", + internalName: "internal.host.com", + apiserver: &configv1.APIServer{ + Spec: configv1.APIServerSpec{ + ServingCerts: configv1.APIServerServingCerts{ + NamedCertificates: []configv1.APIServerNamedServingCert{ + {Names: []string{"internal.*"}}, + }, + }, + }, + }, + validateErrors: func(t *testing.T, errs field.ErrorList) { + t.Helper() + if len(errs) != 1 { + t.Fatal(errs) + } + if errs[0].Error() != `spec.servingCerts[0].names[0]: Invalid value: "internal.*": may not match internal loadbalancer: "internal.host.com"` { + t.Error(errs[0]) + } + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeclient := configclientfake.NewSimpleClientset(&configv1.Infrastructure{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster"}, + Status: configv1.InfrastructureStatus{ + APIServerInternalURL: test.internalName, + }, + }) + + instance := apiserverV1{ + infrastructureGetter: func() configv1client.InfrastructuresGetter { + return fakeclient.ConfigV1() + }, + } + test.validateErrors(t, instance.validateSNINames(test.apiserver)) + }) + + } +} + +func Test_validateTLSSecurityProfile(t *testing.T) { + rootFieldPath := field.NewPath("testSpec") + + tests := []struct { + name string + profile *configv1.TLSSecurityProfile + want field.ErrorList + }{ + { + name: "nil profile", + profile: nil, + want: field.ErrorList{}, + }, + { + name: "empty profile", + profile: &configv1.TLSSecurityProfile{}, + want: field.ErrorList{}, + }, + { + name: "type does not match set field", + profile: &configv1.TLSSecurityProfile{ + Type: configv1.TLSProfileIntermediateType, + Modern: &configv1.ModernTLSProfile{}, + }, + want: field.ErrorList{ + field.Required(rootFieldPath.Child("intermediate"), "type set to Intermediate, but the corresponding field is unset"), + }, + }, + { + name: "modern type - currently unsupported", + profile: &configv1.TLSSecurityProfile{ + Type: configv1.TLSProfileModernType, + Modern: &configv1.ModernTLSProfile{}, + }, + want: field.ErrorList{ + field.NotSupported(rootFieldPath.Child("type"), configv1.TLSProfileModernType, + []string{ + string(configv1.TLSProfileOldType), + string(configv1.TLSProfileIntermediateType), + string(configv1.TLSProfileCustomType), + }), + }, + }, + { + name: "unknown type", + profile: &configv1.TLSSecurityProfile{ + Type: "something", + }, + want: field.ErrorList{ + field.Invalid(rootFieldPath.Child("type"), "something", "unknown type, valid values are: [Old Intermediate Custom]"), + }, + }, + { + name: "unknown cipher", + profile: &configv1.TLSSecurityProfile{ + Type: "Custom", + Custom: &configv1.CustomTLSProfile{ + TLSProfileSpec: configv1.TLSProfileSpec{ + Ciphers: []string{ + "UNKNOWN_CIPHER", + }, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(rootFieldPath.Child("custom", "ciphers"), []string{"UNKNOWN_CIPHER"}, "no supported cipher suite found"), + field.Invalid(rootFieldPath.Child("custom", "ciphers"), []string{"UNKNOWN_CIPHER"}, "http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of ECDHE-RSA-AES128-GCM-SHA256 or ECDHE-ECDSA-AES128-GCM-SHA256)"), + }, + }, + { + name: "unknown cipher but a normal cipher", + profile: &configv1.TLSSecurityProfile{ + Type: "Custom", + Custom: &configv1.CustomTLSProfile{ + TLSProfileSpec: configv1.TLSProfileSpec{ + Ciphers: []string{ + "UNKNOWN_CIPHER", "ECDHE-RSA-AES128-GCM-SHA256", + }, + }, + }, + }, + want: field.ErrorList{}, + }, + { + name: "no ciphers in custom profile", + profile: &configv1.TLSSecurityProfile{ + Type: "Custom", + Custom: &configv1.CustomTLSProfile{ + TLSProfileSpec: configv1.TLSProfileSpec{}, + }, + }, + want: field.ErrorList{ + field.Invalid(rootFieldPath.Child("custom", "ciphers"), []string(nil), "no supported cipher suite found"), + field.Invalid(rootFieldPath.Child("custom", "ciphers"), []string(nil), "http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of ECDHE-RSA-AES128-GCM-SHA256 or ECDHE-ECDSA-AES128-GCM-SHA256)"), + }, + }, + { + name: "min tls 1.3 - currently unsupported", + profile: &configv1.TLSSecurityProfile{ + Type: "Custom", + Custom: &configv1.CustomTLSProfile{ + TLSProfileSpec: configv1.TLSProfileSpec{ + Ciphers: []string{"ECDHE-ECDSA-CHACHA20-POLY1305"}, + MinTLSVersion: configv1.VersionTLS13, + }, + }, + }, + want: field.ErrorList{ + field.NotSupported(rootFieldPath.Child("custom", "minTLSVersion"), configv1.VersionTLS13, []string{string(configv1.VersionTLS10), string(configv1.VersionTLS11), string(configv1.VersionTLS12)}), + }, + }, + { + name: "custom profile missing required http2 ciphers", + profile: &configv1.TLSSecurityProfile{ + Type: "Custom", + Custom: &configv1.CustomTLSProfile{ + TLSProfileSpec: configv1.TLSProfileSpec{ + Ciphers: []string{ + "ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-CHACHA20-POLY1305", + "ECDHE-RSA-CHACHA20-POLY1305", + }, + MinTLSVersion: configv1.VersionTLS12, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(rootFieldPath.Child("custom", "ciphers"), []string{"ECDSA-AES256-GCM-SHA384", "ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-ECDSA-CHACHA20-POLY1305", "ECDHE-RSA-CHACHA20-POLY1305"}, "http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of ECDHE-RSA-AES128-GCM-SHA256 or ECDHE-ECDSA-AES128-GCM-SHA256)"), + }, + }, + { + name: "custom profile with one required http2 ciphers", + profile: &configv1.TLSSecurityProfile{ + Type: "Custom", + Custom: &configv1.CustomTLSProfile{ + TLSProfileSpec: configv1.TLSProfileSpec{ + Ciphers: []string{ + "ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-CHACHA20-POLY1305", + "ECDHE-RSA-CHACHA20-POLY1305", + "ECDHE-RSA-AES128-GCM-SHA256", + }, + MinTLSVersion: configv1.VersionTLS12, + }, + }, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := validateTLSSecurityProfile(rootFieldPath, tt.profile) + + if len(tt.want) != len(got) { + t.Errorf("expected %d errors, got %d: %v", len(tt.want), len(got), got) + return + } + + for i, err := range got { + if err.Error() != tt.want[i].Error() { + t.Errorf("expected %v, got %v", tt.want, got) + break + } + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validation_wrapper.go b/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validation_wrapper.go new file mode 100644 index 0000000000000..149361cd1e096 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validation_wrapper.go @@ -0,0 +1,72 @@ +package apiserver + +import ( + "fmt" + "io" + + configv1 "github.com/openshift/api/config/v1" + configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + "github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" + + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" + "k8s.io/client-go/rest" +) + +const PluginName = "config.openshift.io/ValidateAPIServer" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return NewValidateAPIServer() + }) +} + +type validateCustomResourceWithClient struct { + admission.ValidationInterface + + infrastructureGetter configv1client.InfrastructuresGetter +} + +func NewValidateAPIServer() (admission.Interface, error) { + ret := &validateCustomResourceWithClient{} + + delegate, err := customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.GroupVersion.WithResource("apiservers").GroupResource(): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("APIServer"): apiserverV1{infrastructureGetter: ret.getInfrastructureGetter}, + }) + if err != nil { + return nil, err + } + ret.ValidationInterface = delegate + + return ret, nil +} + +var _ admissionrestconfig.WantsRESTClientConfig = &validateCustomResourceWithClient{} + +func (a *validateCustomResourceWithClient) getInfrastructureGetter() configv1client.InfrastructuresGetter { + return a.infrastructureGetter +} + +func (a *validateCustomResourceWithClient) SetRESTClientConfig(restClientConfig rest.Config) { + var err error + a.infrastructureGetter, err = configv1client.NewForConfig(&restClientConfig) + if err != nil { + utilruntime.HandleError(err) + return + } +} + +func (a *validateCustomResourceWithClient) ValidateInitialization() error { + if a.infrastructureGetter == nil { + return fmt.Errorf(PluginName + " needs an infrastructureGetter") + } + + return nil +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/attributes.go b/openshift-kube-apiserver/admission/customresourcevalidation/attributes.go new file mode 100644 index 0000000000000..0f1f379d576cf --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/attributes.go @@ -0,0 +1,59 @@ +package customresourcevalidation + +import ( + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" + + apiv1 "github.com/openshift/api/apiserver/v1" + authorizationv1 "github.com/openshift/api/authorization/v1" + configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" + quotav1 "github.com/openshift/api/quota/v1" + routev1 "github.com/openshift/api/route/v1" + securityv1 "github.com/openshift/api/security/v1" +) + +// unstructuredUnpackingAttributes tries to convert to a real object in the config scheme +type unstructuredUnpackingAttributes struct { + admission.Attributes +} + +func (a *unstructuredUnpackingAttributes) GetObject() runtime.Object { + return toBestObjectPossible(a.Attributes.GetObject()) +} + +func (a *unstructuredUnpackingAttributes) GetOldObject() runtime.Object { + return toBestObjectPossible(a.Attributes.GetOldObject()) +} + +// toBestObjectPossible tries to convert to a real object in the supported scheme +func toBestObjectPossible(orig runtime.Object) runtime.Object { + unstructuredOrig, ok := orig.(runtime.Unstructured) + if !ok { + return orig + } + + targetObj, err := supportedObjectsScheme.New(unstructuredOrig.GetObjectKind().GroupVersionKind()) + if err != nil { + utilruntime.HandleError(err) + return unstructuredOrig + } + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredOrig.UnstructuredContent(), targetObj); err != nil { + utilruntime.HandleError(err) + return unstructuredOrig + } + return targetObj +} + +var supportedObjectsScheme = runtime.NewScheme() + +func init() { + utilruntime.Must(configv1.Install(supportedObjectsScheme)) + utilruntime.Must(operatorv1.Install(supportedObjectsScheme)) + utilruntime.Must(quotav1.Install(supportedObjectsScheme)) + utilruntime.Must(securityv1.Install(supportedObjectsScheme)) + utilruntime.Must(authorizationv1.Install(supportedObjectsScheme)) + utilruntime.Must(apiv1.Install(supportedObjectsScheme)) + utilruntime.Must(routev1.Install(supportedObjectsScheme)) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/authentication/validate_authentication.go b/openshift-kube-apiserver/admission/customresourcevalidation/authentication/validate_authentication.go new file mode 100644 index 0000000000000..26506e47019cf --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/authentication/validate_authentication.go @@ -0,0 +1,134 @@ +package authentication + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateAuthentication" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return crvalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.GroupVersion.WithResource("authentications").GroupResource(): true, + }, + map[schema.GroupVersionKind]crvalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("Authentication"): authenticationV1{}, + }) + }) +} + +func toAuthenticationV1(uncastObj runtime.Object) (*configv1.Authentication, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + obj, ok := uncastObj.(*configv1.Authentication) + if !ok { + return nil, field.ErrorList{ + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Authentication"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"}), + } + } + + return obj, nil +} + +type authenticationV1 struct{} + +func (authenticationV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, errs := toAuthenticationV1(uncastObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, crvalidation.RequireNameCluster, field.NewPath("metadata"))...) + errs = append(errs, validateAuthenticationSpecCreate(obj.Spec)...) + + return errs +} + +func (authenticationV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toAuthenticationV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toAuthenticationV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateAuthenticationSpecUpdate(obj.Spec, oldObj.Spec)...) + + return errs +} + +func (authenticationV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toAuthenticationV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toAuthenticationV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateAuthenticationStatus(obj.Status)...) + + return errs +} + +func validateAuthenticationSpecCreate(spec configv1.AuthenticationSpec) field.ErrorList { + return validateAuthenticationSpec(spec) +} + +func validateAuthenticationSpecUpdate(newspec, oldspec configv1.AuthenticationSpec) field.ErrorList { + return validateAuthenticationSpec(newspec) +} + +func validateAuthenticationSpec(spec configv1.AuthenticationSpec) field.ErrorList { + errs := field.ErrorList{} + specField := field.NewPath("spec") + + if spec.WebhookTokenAuthenticator != nil { + switch spec.Type { + case configv1.AuthenticationTypeNone, configv1.AuthenticationTypeIntegratedOAuth, "": + // validate the secret name in WebhookTokenAuthenticator + errs = append( + errs, + crvalidation.ValidateSecretReference( + specField.Child("webhookTokenAuthenticator").Child("kubeConfig"), + spec.WebhookTokenAuthenticator.KubeConfig, + false, + )..., + ) + default: + errs = append(errs, field.Invalid(specField.Child("webhookTokenAuthenticator"), + spec.WebhookTokenAuthenticator, fmt.Sprintf("this field cannot be set with the %q .spec.type", spec.Type), + )) + } + + } + + errs = append(errs, crvalidation.ValidateConfigMapReference(specField.Child("oauthMetadata"), spec.OAuthMetadata, false)...) + + return errs +} + +func validateAuthenticationStatus(status configv1.AuthenticationStatus) field.ErrorList { + return crvalidation.ValidateConfigMapReference(field.NewPath("status", "integratedOAuthMetadata"), status.IntegratedOAuthMetadata, false) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/authentication/validate_authentication_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/authentication/validate_authentication_test.go new file mode 100644 index 0000000000000..d93f3f67f6fe9 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/authentication/validate_authentication_test.go @@ -0,0 +1,179 @@ +package authentication + +import ( + "testing" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func TestFailValidateAuthenticationSpec(t *testing.T) { + errorCases := map[string]struct { + spec configv1.AuthenticationSpec + errorType field.ErrorType + errorField string + }{ + "invalid metadata ref": { + spec: configv1.AuthenticationSpec{ + Type: "", + OAuthMetadata: configv1.ConfigMapNameReference{ + Name: "../shadow", + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.oauthMetadata.name", + }, + "invalid webhook ref": { + spec: configv1.AuthenticationSpec{ + WebhookTokenAuthenticator: &configv1.WebhookTokenAuthenticator{ + KubeConfig: configv1.SecretNameReference{Name: "this+that"}, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.webhookTokenAuthenticator.kubeConfig.name", + }, + "valid webhook ref": { + spec: configv1.AuthenticationSpec{ + WebhookTokenAuthenticator: &configv1.WebhookTokenAuthenticator{ + KubeConfig: configv1.SecretNameReference{Name: "this"}, + }, + }, + }, + "invalid webhook ref for a Type": { + spec: configv1.AuthenticationSpec{ + Type: "OIDC", + WebhookTokenAuthenticator: &configv1.WebhookTokenAuthenticator{ + KubeConfig: configv1.SecretNameReference{Name: "this"}, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.webhookTokenAuthenticator", + }, + } + + for tcName, tc := range errorCases { + errs := validateAuthenticationSpec(tc.spec) + if (len(errs) > 0) != (len(tc.errorType) != 0) { + t.Errorf("'%s': expected failure: %t, got: %t", tcName, len(tc.errorType) != 0, len(errs) > 0) + } + + for _, e := range errs { + if e.Type != tc.errorType { + t.Errorf("'%s': expected errors of type '%s', got %v:", tcName, tc.errorType, e) + } + + if e.Field != tc.errorField { + t.Errorf("'%s': expected errors in field '%s', got %v:", tcName, tc.errorField, e) + } + } + } +} + +func TestSucceedValidateAuthenticationSpec(t *testing.T) { + successCases := map[string]configv1.AuthenticationSpec{ + "integrated oauth authn type": { + Type: "IntegratedOAuth", + }, + "_none_ authn type": { + Type: "None", + }, + "empty authn type": { + Type: "", + }, + "integrated oauth + oauth metadata": { + OAuthMetadata: configv1.ConfigMapNameReference{ + Name: "configmapwithmetadata", + }, + }, + "webhook set": { + WebhookTokenAuthenticators: []configv1.DeprecatedWebhookTokenAuthenticator{ + {KubeConfig: configv1.SecretNameReference{Name: "wheniwaslittleiwantedtobecomeawebhook"}}, + }, + }, + "some webhooks": { + WebhookTokenAuthenticators: []configv1.DeprecatedWebhookTokenAuthenticator{ + {KubeConfig: configv1.SecretNameReference{Name: "whatacoolnameforasecret"}}, + {KubeConfig: configv1.SecretNameReference{Name: "whatacoolnameforasecret2"}}, + {KubeConfig: configv1.SecretNameReference{Name: "thisalsoisacoolname"}}, + {KubeConfig: configv1.SecretNameReference{Name: "letsnotoverdoit"}}, + }, + }, + "all fields set": { + Type: "IntegratedOAuth", + OAuthMetadata: configv1.ConfigMapNameReference{ + Name: "suchname", + }, + WebhookTokenAuthenticators: []configv1.DeprecatedWebhookTokenAuthenticator{ + {KubeConfig: configv1.SecretNameReference{Name: "thisisawebhook"}}, + {KubeConfig: configv1.SecretNameReference{Name: "thisisawebhook2"}}, + {KubeConfig: configv1.SecretNameReference{Name: "thisisawebhook33"}}, + }, + }, + } + + for tcName, s := range successCases { + errs := validateAuthenticationSpec(s) + if len(errs) != 0 { + t.Errorf("'%s': expected success, but failed: %v", tcName, errs.ToAggregate().Error()) + } + } +} + +func TestFailValidateAuthenticationStatus(t *testing.T) { + errorCases := map[string]struct { + status configv1.AuthenticationStatus + errorType field.ErrorType + errorField string + }{ + "wrong reference name": { + status: configv1.AuthenticationStatus{ + IntegratedOAuthMetadata: configv1.ConfigMapNameReference{ + Name: "something_wrong", + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "status.integratedOAuthMetadata.name", + }, + } + + for tcName, tc := range errorCases { + errs := validateAuthenticationStatus(tc.status) + if len(errs) == 0 { + t.Errorf("'%s': should have failed but did not", tcName) + } + + for _, e := range errs { + if e.Type != tc.errorType { + t.Errorf("'%s': expected errors of type '%s', got %v:", tcName, tc.errorType, e) + } + + if e.Field != tc.errorField { + t.Errorf("'%s': expected errors in field '%s', got %v:", tcName, tc.errorField, e) + } + } + } +} + +func TestSucceedValidateAuthenticationStatus(t *testing.T) { + successCases := map[string]configv1.AuthenticationStatus{ + "basic case": { + IntegratedOAuthMetadata: configv1.ConfigMapNameReference{ + Name: "hey-there", + }, + }, + "empty reference": { + IntegratedOAuthMetadata: configv1.ConfigMapNameReference{ + Name: "", + }, + }, + "empty status": {}, + } + + for tcName, s := range successCases { + errs := validateAuthenticationStatus(s) + if len(errs) != 0 { + t.Errorf("'%s': expected success, but failed: %v", tcName, errs.ToAggregate().Error()) + } + } + +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validate_crq.go b/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validate_crq.go new file mode 100644 index 0000000000000..8cdfd33de381f --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validate_crq.go @@ -0,0 +1,84 @@ +package clusterresourcequota + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + quotav1 "github.com/openshift/api/quota/v1" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" + quotavalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation" +) + +const PluginName = "quota.openshift.io/ValidateClusterResourceQuota" + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + {Group: quotav1.GroupName, Resource: "clusterresourcequotas"}: true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + quotav1.GroupVersion.WithKind("ClusterResourceQuota"): clusterResourceQuotaV1{}, + }) + }) +} + +func toClusterResourceQuota(uncastObj runtime.Object) (*quotav1.ClusterResourceQuota, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*quotav1.ClusterResourceQuota) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"ClusterResourceQuota"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{quotav1.GroupVersion.String()})) + } + + return obj, nil +} + +type clusterResourceQuotaV1 struct { +} + +func (clusterResourceQuotaV1) ValidateCreate(_ context.Context, obj runtime.Object) field.ErrorList { + clusterResourceQuotaObj, errs := toClusterResourceQuota(obj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&clusterResourceQuotaObj.ObjectMeta, false, validation.NameIsDNSSubdomain, field.NewPath("metadata"))...) + errs = append(errs, quotavalidation.ValidateClusterResourceQuota(clusterResourceQuotaObj)...) + + return errs +} + +func (clusterResourceQuotaV1) ValidateUpdate(_ context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList { + clusterResourceQuotaObj, errs := toClusterResourceQuota(obj) + if len(errs) > 0 { + return errs + } + clusterResourceQuotaOldObj, errs := toClusterResourceQuota(oldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&clusterResourceQuotaObj.ObjectMeta, false, validation.NameIsDNSSubdomain, field.NewPath("metadata"))...) + errs = append(errs, quotavalidation.ValidateClusterResourceQuotaUpdate(clusterResourceQuotaObj, clusterResourceQuotaOldObj)...) + + return errs +} + +func (c clusterResourceQuotaV1) ValidateStatusUpdate(ctx context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList { + return c.ValidateUpdate(ctx, obj, oldObj) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation/validation.go b/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation/validation.go new file mode 100644 index 0000000000000..7bc1767497bb5 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation/validation.go @@ -0,0 +1,68 @@ +package validation + +import ( + "sort" + + unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/v1" + "k8s.io/kubernetes/pkg/apis/core/validation" + + quotav1 "github.com/openshift/api/quota/v1" +) + +func ValidateClusterResourceQuota(clusterquota *quotav1.ClusterResourceQuota) field.ErrorList { + allErrs := validation.ValidateObjectMeta(&clusterquota.ObjectMeta, false, validation.ValidateResourceQuotaName, field.NewPath("metadata")) + + hasSelectionCriteria := (clusterquota.Spec.Selector.LabelSelector != nil && len(clusterquota.Spec.Selector.LabelSelector.MatchLabels)+len(clusterquota.Spec.Selector.LabelSelector.MatchExpressions) > 0) || + (len(clusterquota.Spec.Selector.AnnotationSelector) > 0) + + if !hasSelectionCriteria { + allErrs = append(allErrs, field.Required(field.NewPath("spec", "selector"), "must restrict the selected projects")) + } + if clusterquota.Spec.Selector.LabelSelector != nil { + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(clusterquota.Spec.Selector.LabelSelector, unversionedvalidation.LabelSelectorValidationOptions{}, field.NewPath("spec", "selector", "labels"))...) + if len(clusterquota.Spec.Selector.LabelSelector.MatchLabels)+len(clusterquota.Spec.Selector.LabelSelector.MatchExpressions) == 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "selector", "labels"), clusterquota.Spec.Selector.LabelSelector, "must restrict the selected projects")) + } + } + if clusterquota.Spec.Selector.AnnotationSelector != nil { + allErrs = append(allErrs, validation.ValidateAnnotations(clusterquota.Spec.Selector.AnnotationSelector, field.NewPath("spec", "selector", "annotations"))...) + } + + internalQuota := &core.ResourceQuotaSpec{} + if err := v1.Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(&clusterquota.Spec.Quota, internalQuota, nil); err != nil { + panic(err) + } + internalStatus := &core.ResourceQuotaStatus{} + if err := v1.Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(&clusterquota.Status.Total, internalStatus, nil); err != nil { + panic(err) + } + + allErrs = append(allErrs, validation.ValidateResourceQuotaSpec(internalQuota, field.NewPath("spec", "quota"))...) + allErrs = append(allErrs, validation.ValidateResourceQuotaStatus(internalStatus, field.NewPath("status", "overall"))...) + + orderedNamespaces := clusterquota.Status.Namespaces.DeepCopy() + sort.Slice(orderedNamespaces, func(i, j int) bool { + return orderedNamespaces[i].Namespace < orderedNamespaces[j].Namespace + }) + + for _, namespace := range orderedNamespaces { + fldPath := field.NewPath("status", "namespaces").Key(namespace.Namespace) + for k, v := range namespace.Status.Used { + resPath := fldPath.Key(string(k)) + allErrs = append(allErrs, validation.ValidateResourceQuotaResourceName(core.ResourceName(k), resPath)...) + allErrs = append(allErrs, validation.ValidateResourceQuantityValue(core.ResourceName(k), v, resPath)...) + } + } + + return allErrs +} + +func ValidateClusterResourceQuotaUpdate(clusterquota, oldClusterResourceQuota *quotav1.ClusterResourceQuota) field.ErrorList { + allErrs := validation.ValidateObjectMetaUpdate(&clusterquota.ObjectMeta, &oldClusterResourceQuota.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateClusterResourceQuota(clusterquota)...) + + return allErrs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation/validation_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation/validation_test.go new file mode 100644 index 0000000000000..c1dbf76aecf46 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation/validation_test.go @@ -0,0 +1,173 @@ +package validation + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/apis/core" + corekubev1 "k8s.io/kubernetes/pkg/apis/core/v1" + "k8s.io/kubernetes/pkg/apis/core/validation" + + quotav1 "github.com/openshift/api/quota/v1" +) + +func spec(scopes ...corev1.ResourceQuotaScope) corev1.ResourceQuotaSpec { + return corev1.ResourceQuotaSpec{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100"), + corev1.ResourceMemory: resource.MustParse("10000"), + corev1.ResourceRequestsCPU: resource.MustParse("100"), + corev1.ResourceRequestsMemory: resource.MustParse("10000"), + corev1.ResourceLimitsCPU: resource.MustParse("100"), + corev1.ResourceLimitsMemory: resource.MustParse("10000"), + corev1.ResourcePods: resource.MustParse("10"), + corev1.ResourceServices: resource.MustParse("0"), + corev1.ResourceReplicationControllers: resource.MustParse("10"), + corev1.ResourceQuotas: resource.MustParse("10"), + corev1.ResourceConfigMaps: resource.MustParse("10"), + corev1.ResourceSecrets: resource.MustParse("10"), + }, + Scopes: scopes, + } +} + +func scopeableSpec(scopes ...corev1.ResourceQuotaScope) corev1.ResourceQuotaSpec { + return corev1.ResourceQuotaSpec{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100"), + corev1.ResourceMemory: resource.MustParse("10000"), + corev1.ResourceRequestsCPU: resource.MustParse("100"), + corev1.ResourceRequestsMemory: resource.MustParse("10000"), + corev1.ResourceLimitsCPU: resource.MustParse("100"), + corev1.ResourceLimitsMemory: resource.MustParse("10000"), + }, + Scopes: scopes, + } +} + +func TestValidationClusterQuota(t *testing.T) { + // storage is not yet supported as a quota tracked resource + invalidQuotaResourceSpec := corev1.ResourceQuotaSpec{ + Hard: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10"), + }, + } + validLabels := map[string]string{"a": "b"} + + errs := ValidateClusterResourceQuota( + "av1.ClusterResourceQuota{ + ObjectMeta: metav1.ObjectMeta{Name: "good"}, + Spec: quotav1.ClusterResourceQuotaSpec{ + Selector: quotav1.ClusterResourceQuotaSelector{LabelSelector: &metav1.LabelSelector{MatchLabels: validLabels}}, + Quota: spec(), + }, + }, + ) + if len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + + errorCases := map[string]struct { + A quotav1.ClusterResourceQuota + T field.ErrorType + F string + }{ + "non-zero-length namespace": { + A: quotav1.ClusterResourceQuota{ + ObjectMeta: metav1.ObjectMeta{Namespace: "bad", Name: "good"}, + Spec: quotav1.ClusterResourceQuotaSpec{ + Selector: quotav1.ClusterResourceQuotaSelector{LabelSelector: &metav1.LabelSelector{MatchLabels: validLabels}}, + Quota: spec(), + }, + }, + T: field.ErrorTypeForbidden, + F: "metadata.namespace", + }, + "missing label selector": { + A: quotav1.ClusterResourceQuota{ + ObjectMeta: metav1.ObjectMeta{Name: "good"}, + Spec: quotav1.ClusterResourceQuotaSpec{ + Quota: spec(), + }, + }, + T: field.ErrorTypeRequired, + F: "spec.selector", + }, + "ok scope": { + A: quotav1.ClusterResourceQuota{ + ObjectMeta: metav1.ObjectMeta{Name: "good"}, + Spec: quotav1.ClusterResourceQuotaSpec{ + Quota: scopeableSpec(corev1.ResourceQuotaScopeNotTerminating), + }, + }, + T: field.ErrorTypeRequired, + F: "spec.selector", + }, + "bad scope": { + A: quotav1.ClusterResourceQuota{ + ObjectMeta: metav1.ObjectMeta{Name: "good"}, + Spec: quotav1.ClusterResourceQuotaSpec{ + Selector: quotav1.ClusterResourceQuotaSelector{LabelSelector: &metav1.LabelSelector{MatchLabels: validLabels}}, + Quota: spec(corev1.ResourceQuotaScopeNotTerminating), + }, + }, + T: field.ErrorTypeInvalid, + F: "spec.quota.scopes", + }, + "bad quota spec": { + A: quotav1.ClusterResourceQuota{ + ObjectMeta: metav1.ObjectMeta{Name: "good"}, + Spec: quotav1.ClusterResourceQuotaSpec{ + Selector: quotav1.ClusterResourceQuotaSelector{LabelSelector: &metav1.LabelSelector{MatchLabels: validLabels}}, + Quota: invalidQuotaResourceSpec, + }, + }, + T: field.ErrorTypeInvalid, + F: "spec.quota.hard[storage]", + }, + } + for k, v := range errorCases { + errs := ValidateClusterResourceQuota(&v.A) + if len(errs) == 0 { + t.Errorf("expected failure %s for %v", k, v.A) + continue + } + for i := range errs { + if errs[i].Type != v.T { + t.Errorf("%s: expected errors to have type %s: %v", k, v.T, errs[i]) + } + if errs[i].Field != v.F { + t.Errorf("%s: expected errors to have field %s: %v", k, v.F, errs[i]) + } + } + } +} + +func TestValidationQuota(t *testing.T) { + tests := map[string]struct { + A corev1.ResourceQuota + T field.ErrorType + F string + }{ + "scope": { + A: corev1.ResourceQuota{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "good"}, + Spec: scopeableSpec(corev1.ResourceQuotaScopeNotTerminating), + }, + }, + } + for k, v := range tests { + internal := core.ResourceQuota{} + if err := corekubev1.Convert_v1_ResourceQuota_To_core_ResourceQuota(&v.A, &internal, nil); err != nil { + panic(err) + } + errs := validation.ValidateResourceQuota(&internal) + if len(errs) != 0 { + t.Errorf("%s: %v", k, errs) + continue + } + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/config/deny_delete_cluster_config_resource.go b/openshift-kube-apiserver/admission/customresourcevalidation/config/deny_delete_cluster_config_resource.go new file mode 100644 index 0000000000000..f637e95cece3a --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/config/deny_delete_cluster_config_resource.go @@ -0,0 +1,54 @@ +package config + +import ( + "context" + "fmt" + "io" + + "k8s.io/apiserver/pkg/admission" +) + +const PluginName = "config.openshift.io/DenyDeleteClusterConfiguration" + +// Register registers an admission plugin factory whose plugin prevents the deletion of cluster configuration resources. +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return newAdmissionPlugin(), nil + }) +} + +var _ admission.ValidationInterface = &admissionPlugin{} + +type admissionPlugin struct { + *admission.Handler +} + +func newAdmissionPlugin() *admissionPlugin { + return &admissionPlugin{Handler: admission.NewHandler(admission.Delete)} +} + +// Validate returns an error if there is an attempt to delete a cluster configuration resource. +func (p *admissionPlugin) Validate(ctx context.Context, attributes admission.Attributes, _ admission.ObjectInterfaces) error { + if len(attributes.GetSubresource()) > 0 { + return nil + } + if attributes.GetResource().Group != "config.openshift.io" { + return nil + } + // clusteroperators can be deleted so that we can force status refreshes and change over time. + // clusterversions not named `version` can be deleted (none are expected to exist). + // other config.openshift.io resources not named `cluster` can be deleted (none are expected to exist). + switch attributes.GetResource().Resource { + case "clusteroperators": + return nil + case "clusterversions": + if attributes.GetName() != "version" { + return nil + } + default: + if attributes.GetName() != "cluster" { + return nil + } + } + return admission.NewForbidden(attributes, fmt.Errorf("deleting required %s.%s resource, named %s, is not allowed", attributes.GetResource().Resource, attributes.GetResource().Group, attributes.GetName())) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/config/deny_delete_cluster_config_resource_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/config/deny_delete_cluster_config_resource_test.go new file mode 100644 index 0000000000000..70d289f5f26df --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/config/deny_delete_cluster_config_resource_test.go @@ -0,0 +1,73 @@ +package config + +import ( + "context" + "testing" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" +) + +func TestAdmissionPlugin_Validate(t *testing.T) { + testCases := []struct { + tcName string + group string + resource string + name string + denyDelete bool + }{ + { + tcName: "NotWhiteListedResourceNamedCluster", + group: "config.openshift.io", + resource: "notWhitelisted", + name: "cluster", + denyDelete: true, + }, + { + tcName: "NotWhiteListedResourceNotNamedCluster", + group: "config.openshift.io", + resource: "notWhitelisted", + name: "notCluster", + denyDelete: false, + }, + { + tcName: "ClusterVersionVersion", + group: "config.openshift.io", + resource: "clusterversions", + name: "version", + denyDelete: true, + }, + { + tcName: "ClusterVersionNotVersion", + group: "config.openshift.io", + resource: "clusterversions", + name: "instance", + denyDelete: false, + }, + { + tcName: "ClusterOperator", + group: "config.openshift.io", + resource: "clusteroperator", + name: "instance", + denyDelete: false, + }, + { + tcName: "OtherGroup", + group: "not.config.openshift.io", + resource: "notWhitelisted", + name: "cluster", + denyDelete: false, + }, + } + for _, tc := range testCases { + t.Run(tc.tcName, func(t *testing.T) { + err := newAdmissionPlugin().Validate(context.TODO(), admission.NewAttributesRecord( + nil, nil, schema.GroupVersionKind{}, "", + tc.name, schema.GroupVersionResource{Group: tc.group, Resource: tc.resource}, + "", admission.Delete, nil, false, nil), nil) + if tc.denyDelete != (err != nil) { + t.Error(tc.denyDelete, err) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/console/validate_console.go b/openshift-kube-apiserver/admission/customresourcevalidation/console/validate_console.go new file mode 100644 index 0000000000000..8f60bbe73c128 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/console/validate_console.go @@ -0,0 +1,119 @@ +package console + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateConsole" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.GroupVersion.WithResource("consoles").GroupResource(): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("Console"): consoleV1{}, + }) + }) +} + +func toConsoleV1(uncastObj runtime.Object) (*configv1.Console, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + errs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.Console) + if !ok { + return nil, append(errs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Console"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type consoleV1 struct{} + +func (consoleV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, errs := toConsoleV1(uncastObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + errs = append(errs, validateConsoleSpecCreate(obj.Spec)...) + + return errs +} + +func (consoleV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toConsoleV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toConsoleV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateConsoleSpecUpdate(obj.Spec, oldObj.Spec)...) + + return errs +} + +func (consoleV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toConsoleV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toConsoleV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateConsoleStatus(obj.Status)...) + + return errs +} + +func validateConsoleSpecCreate(spec configv1.ConsoleSpec) field.ErrorList { + errs := field.ErrorList{} + + // TODO + + return errs +} + +func validateConsoleSpecUpdate(newSpec, oldSpec configv1.ConsoleSpec) field.ErrorList { + errs := field.ErrorList{} + + // TODO + + return errs +} + +func validateConsoleStatus(status configv1.ConsoleStatus) field.ErrorList { + errs := field.ErrorList{} + + // TODO + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration/cr_validation_registration.go b/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration/cr_validation_registration.go new file mode 100644 index 0000000000000..76bdd704ec165 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration/cr_validation_registration.go @@ -0,0 +1,92 @@ +package customresourcevalidationregistration + +import ( + "k8s.io/apiserver/pkg/admission" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/apirequestcount" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/apiserver" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/authentication" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/config" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/console" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/dns" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/features" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/image" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/kubecontrollermanager" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/network" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/node" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/oauth" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/operator" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/project" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/route" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/scheduler" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints" +) + +// AllCustomResourceValidators are the names of all custom resource validators that should be registered +var AllCustomResourceValidators = []string{ + apiserver.PluginName, + authentication.PluginName, + features.PluginName, + console.PluginName, + dns.PluginName, + image.PluginName, + oauth.PluginName, + project.PluginName, + config.PluginName, + operator.PluginName, + scheduler.PluginName, + clusterresourcequota.PluginName, + securitycontextconstraints.PluginName, + rolebindingrestriction.PluginName, + network.PluginName, + apirequestcount.PluginName, + node.PluginName, + route.DefaultingPluginName, + route.PluginName, + + // the kubecontrollermanager operator resource has to exist in order to run deployments to deploy admission webhooks. + kubecontrollermanager.PluginName, + + // this one is special because we don't work without it. + securitycontextconstraints.DefaultingPluginName, +} + +func RegisterCustomResourceValidation(plugins *admission.Plugins) { + apiserver.Register(plugins) + authentication.Register(plugins) + features.Register(plugins) + console.Register(plugins) + dns.Register(plugins) + image.Register(plugins) + oauth.Register(plugins) + project.Register(plugins) + config.Register(plugins) + operator.Register(plugins) + scheduler.Register(plugins) + kubecontrollermanager.Register(plugins) + + // This plugin validates the quota.openshift.io/v1 ClusterResourceQuota resources. + // NOTE: This is only allowed because it is required to get a running control plane operator. + clusterresourcequota.Register(plugins) + // This plugin validates the security.openshift.io/v1 SecurityContextConstraints resources. + securitycontextconstraints.Register(plugins) + // This plugin validates the authorization.openshift.io/v1 RoleBindingRestriction resources. + rolebindingrestriction.Register(plugins) + // This plugin validates the network.config.openshift.io object for service node port range changes + network.Register(plugins) + // This plugin validates the apiserver.openshift.io/v1 APIRequestCount resources. + apirequestcount.Register(plugins) + // This plugin validates config.openshift.io/v1/node objects + node.Register(plugins) + + // this one is special because we don't work without it. + securitycontextconstraints.RegisterDefaulting(plugins) + + // Requests to route.openshift.io/v1 should only go through kube-apiserver admission if + // served via CRD. Most OpenShift flavors (including vanilla) will continue to do validation + // and defaulting inside openshift-apiserver. + route.Register(plugins) + route.RegisterDefaulting(plugins) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidator.go b/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidator.go new file mode 100644 index 0000000000000..94f763ea2ca30 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidator.go @@ -0,0 +1,101 @@ +package customresourcevalidation + +import ( + "context" + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" +) + +// ObjectValidator validates a given resource across create, +// update and status update ops. +type ObjectValidator interface { + // TODO: add router validation logic with ctx, remove this todo once added + ValidateCreate(ctx context.Context, obj runtime.Object) field.ErrorList + ValidateUpdate(ctx context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList + ValidateStatusUpdate(ctx context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList +} + +// ValidateCustomResource is an implementation of admission.Interface. +// It looks at all new pods and overrides each container's image pull policy to Always. +type validateCustomResource struct { + *admission.Handler + + resources map[schema.GroupResource]bool + validators map[schema.GroupVersionKind]ObjectValidator +} + +func NewValidator(resources map[schema.GroupResource]bool, validators map[schema.GroupVersionKind]ObjectValidator) (admission.ValidationInterface, error) { + return &validateCustomResource{ + Handler: admission.NewHandler(admission.Create, admission.Update), + resources: resources, + validators: validators, + }, nil +} + +var _ admission.ValidationInterface = &validateCustomResource{} + +// Validate is an admission function that will validate a CRD in config.openshift.io. uncastAttributes are attributes +// that are of type unstructured. +func (a *validateCustomResource) Validate(ctx context.Context, uncastAttributes admission.Attributes, _ admission.ObjectInterfaces) error { + attributes := &unstructuredUnpackingAttributes{Attributes: uncastAttributes} + if a.shouldIgnore(attributes) { + return nil + } + validator, ok := a.validators[attributes.GetKind()] + if !ok { + return admission.NewForbidden(attributes, fmt.Errorf("unhandled kind: %v", attributes.GetKind())) + } + + switch attributes.GetOperation() { + case admission.Create: + // creating subresources isn't something we understand, but we can be pretty sure we don't need to validate it + if len(attributes.GetSubresource()) > 0 { + return nil + } + errors := validator.ValidateCreate(ctx, attributes.GetObject()) + if len(errors) == 0 { + return nil + } + return apierrors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errors) + + case admission.Update: + switch attributes.GetSubresource() { + case "": + errors := validator.ValidateUpdate(ctx, attributes.GetObject(), attributes.GetOldObject()) + if len(errors) == 0 { + return nil + } + return apierrors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errors) + + case "status": + errors := validator.ValidateStatusUpdate(ctx, attributes.GetObject(), attributes.GetOldObject()) + if len(errors) == 0 { + return nil + } + return apierrors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errors) + + default: + return admission.NewForbidden(attributes, fmt.Errorf("unhandled subresource: %v", attributes.GetSubresource())) + } + + default: + return admission.NewForbidden(attributes, fmt.Errorf("unhandled operation: %v", attributes.GetOperation())) + } +} + +func (a *validateCustomResource) shouldIgnore(attributes admission.Attributes) bool { + if !a.resources[attributes.GetResource().GroupResource()] { + return true + } + // if a subresource is specified and it isn't status, skip it + if len(attributes.GetSubresource()) > 0 && attributes.GetSubresource() != "status" { + return true + } + + return false +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidator_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidator_test.go new file mode 100644 index 0000000000000..6fa92c79e8604 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidator_test.go @@ -0,0 +1,278 @@ +package customresourcevalidation + +import ( + "context" + "errors" + "fmt" + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/user" + + configv1 "github.com/openshift/api/config/v1" +) + +func TestCustomResourceValidator(t *testing.T) { + + const ( + testGroup = "config.openshift.io" + testVersion = "v1" + testResource = "images" + testKind = "Image" + ) + + var testObjectType *configv1.Image + + testCases := []struct { + description string + object runtime.Object + objectBytes []byte + oldObject runtime.Object + oldObjectBytes []byte + kind schema.GroupVersionKind + namespace string + name string + resource schema.GroupVersionResource + subresource string + operation admission.Operation + userInfo user.Info + expectError bool + expectCreateFuncCalled bool + expectUpdateFuncCalled bool + expectStatusUpdateFuncCalled bool + validateFuncErr bool + expectedObjectType interface{} + }{ + { + description: "ShouldIgnoreUnknownResource", + resource: schema.GroupVersionResource{ + Group: "other_group", + Version: "other_version", + Resource: "other_resource", + }, + }, + { + description: "ShouldIgnoreUnknownSubresource", + subresource: "not_status", + }, + { + description: "ShouldIgnoreUnknownSubresource", + subresource: "not_status", + }, + { + description: "UnhandledOperationConnect", + operation: admission.Connect, + expectError: true, + }, + { + description: "UnhandledOperationDelete", + operation: admission.Delete, + expectError: true, + }, + { + description: "UnhandledKind", + operation: admission.Create, + kind: schema.GroupVersionKind{ + Group: "other_group", + Version: "other_version", + Kind: "other_resource", + }, + expectError: true, + }, + { + description: "Create", + operation: admission.Create, + objectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + expectCreateFuncCalled: true, + expectedObjectType: testObjectType, + }, + { + description: "CreateSubresourceNope", + operation: admission.Create, + subresource: "status", + objectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + }, + { + description: "CreateError", + operation: admission.Create, + objectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + validateFuncErr: true, + expectCreateFuncCalled: true, + expectError: true, + }, + { + description: "Update", + operation: admission.Update, + objectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + oldObjectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + expectUpdateFuncCalled: true, + expectedObjectType: testObjectType, + }, + { + description: "UpdateError", + operation: admission.Update, + objectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + oldObjectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + validateFuncErr: true, + expectError: true, + }, + { + description: "UpdateStatus", + operation: admission.Update, + subresource: "status", + objectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + oldObjectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + expectStatusUpdateFuncCalled: true, + expectedObjectType: testObjectType, + }, + { + description: "UpdateStatusError", + operation: admission.Update, + subresource: "status", + objectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + oldObjectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + expectStatusUpdateFuncCalled: true, + validateFuncErr: true, + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + + var createFuncCalled bool + var updateFuncCalled bool + var updateStatusFuncCalled bool + var funcArgObject runtime.Object + var funcArgOldObject runtime.Object + + handler, err := NewValidator( + map[schema.GroupResource]bool{ + {Group: testGroup, Resource: testResource}: true, + }, + map[schema.GroupVersionKind]ObjectValidator{ + {Group: testGroup, Version: testVersion, Kind: testKind}: testValidator{ + validateCreate: func(_ context.Context, obj runtime.Object) field.ErrorList { + createFuncCalled = true + if tc.validateFuncErr { + return field.ErrorList{field.InternalError(field.NewPath("test"), errors.New("TEST Error"))} + } + funcArgObject = obj + return nil + }, + validateUpdate: func(_ context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList { + if tc.validateFuncErr { + return field.ErrorList{field.InternalError(field.NewPath("test"), errors.New("TEST Error"))} + } + updateFuncCalled = true + funcArgObject = obj + funcArgOldObject = oldObj + return nil + }, + validateStatusUpdate: func(_ context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList { + updateStatusFuncCalled = true + if tc.validateFuncErr { + return field.ErrorList{field.InternalError(field.NewPath("test"), errors.New("TEST Error"))} + } + funcArgObject = obj + funcArgOldObject = oldObj + return nil + }, + }, + }, + ) + if err != nil { + t.Fatal(err) + } + validator := handler.(admission.ValidationInterface) + + if len(tc.objectBytes) > 0 { + object, kind, err := unstructured.UnstructuredJSONScheme.Decode(tc.objectBytes, nil, nil) + if err != nil { + t.Fatal(err) + } + tc.object = object.(runtime.Object) + tc.kind = *kind + } + + if len(tc.oldObjectBytes) > 0 { + object, kind, err := unstructured.UnstructuredJSONScheme.Decode(tc.oldObjectBytes, nil, nil) + if err != nil { + t.Fatal(err) + } + tc.oldObject = object.(runtime.Object) + tc.kind = *kind + } + + if tc.resource == (schema.GroupVersionResource{}) { + tc.resource = schema.GroupVersionResource{ + Group: testGroup, + Version: testVersion, + Resource: testResource, + } + } + + attributes := admission.NewAttributesRecord( + tc.object, + tc.oldObject, + tc.kind, + tc.namespace, + tc.name, + tc.resource, + tc.subresource, + tc.operation, + nil, + false, + tc.userInfo, + ) + + err = validator.Validate(context.TODO(), attributes, nil) + switch { + case tc.expectError && err == nil: + t.Error("Error expected") + case !tc.expectError && err != nil: + t.Errorf("Unexpected error: %v", err) + } + if tc.expectCreateFuncCalled != createFuncCalled { + t.Errorf("ValidateObjCreateFunc called: expected: %v, actual: %v", tc.expectCreateFuncCalled, createFuncCalled) + } + if tc.expectUpdateFuncCalled != updateFuncCalled { + t.Errorf("ValidateObjUpdateFunc called: expected: %v, actual: %v", tc.expectUpdateFuncCalled, updateFuncCalled) + } + if tc.expectStatusUpdateFuncCalled != updateStatusFuncCalled { + t.Errorf("ValidateStatusUpdateFunc called: expected: %v, actual: %v", tc.expectStatusUpdateFuncCalled, updateStatusFuncCalled) + } + if reflect.TypeOf(tc.expectedObjectType) != reflect.TypeOf(funcArgObject) { + t.Errorf("Expected %T, actual %T", tc.expectedObjectType, funcArgObject) + } + if (tc.oldObject != nil) && (reflect.TypeOf(tc.expectedObjectType) != reflect.TypeOf(funcArgOldObject)) { + t.Errorf("Expected %T, actual %T", tc.expectedObjectType, funcArgOldObject) + } + }) + } + +} + +type testValidator struct { + validateCreate func(_ context.Context, uncastObj runtime.Object) field.ErrorList + validateUpdate func(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList + validateStatusUpdate func(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList +} + +func (v testValidator) ValidateCreate(ctx context.Context, uncastObj runtime.Object) field.ErrorList { + return v.validateCreate(ctx, uncastObj) +} + +func (v testValidator) ValidateUpdate(ctx context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + return v.validateUpdate(ctx, uncastObj, uncastOldObj) + +} + +func (v testValidator) ValidateStatusUpdate(ctx context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + return v.validateStatusUpdate(ctx, uncastObj, uncastOldObj) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/dns/validate_dns.go b/openshift-kube-apiserver/admission/customresourcevalidation/dns/validate_dns.go new file mode 100644 index 0000000000000..0ae18e8f7e684 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/dns/validate_dns.go @@ -0,0 +1,242 @@ +package dns + +import ( + "context" + "fmt" + "io" + "reflect" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/validation" + unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + api "k8s.io/kubernetes/pkg/apis/core" + k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" + apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" + + operatorv1 "github.com/openshift/api/operator/v1" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "operator.openshift.io/ValidateDNS" + +// Register registers the DNS validation plugin. +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return crvalidation.NewValidator( + map[schema.GroupResource]bool{ + operatorv1.GroupVersion.WithResource("dnses").GroupResource(): true, + }, + map[schema.GroupVersionKind]crvalidation.ObjectValidator{ + operatorv1.GroupVersion.WithKind("DNS"): dnsV1{}, + }) + }) +} + +// toDNSV1 converts a runtime object to a versioned DNS. +func toDNSV1(uncastObj runtime.Object) (*operatorv1.DNS, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + obj, ok := uncastObj.(*operatorv1.DNS) + if !ok { + return nil, field.ErrorList{ + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"DNS"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"operator.openshift.io/v1"}), + } + } + + return obj, nil +} + +// dnsV1 is runtime object that is validated as a versioned DNS. +type dnsV1 struct{} + +// ValidateCreate validates a DNS that is being created. +func (dnsV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, errs := toDNSV1(uncastObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, validation.NameIsDNSSubdomain, field.NewPath("metadata"))...) + errs = append(errs, validateDNSSpecCreate(obj.Spec)...) + + return errs +} + +// ValidateUpdate validates a DNS that is being updated. +func (dnsV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toDNSV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toDNSV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateDNSSpecUpdate(obj.Spec, oldObj.Spec)...) + + return errs +} + +// ValidateStatusUpdate validates a DNS status that is being updated. +func (dnsV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toDNSV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toDNSV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} + +// validateDNSSpecCreate validates the spec of a DNS that is being created. +func validateDNSSpecCreate(spec operatorv1.DNSSpec) field.ErrorList { + var errs field.ErrorList + specField := field.NewPath("spec") + errs = append(errs, validateDNSNodePlacement(spec.NodePlacement, specField.Child("nodePlacement"))...) + errs = append(errs, validateUpstreamResolversCreate(spec.UpstreamResolvers, specField.Child("upstreamResolvers"))...) + errs = append(errs, validateServersCreate(spec.Servers, specField.Child("servers"))...) + return errs +} + +// validateDNSSpecUpdate validates the spec of a DNS that is being updated. +func validateDNSSpecUpdate(newspec, oldspec operatorv1.DNSSpec) field.ErrorList { + var errs field.ErrorList + specField := field.NewPath("spec") + errs = append(errs, validateDNSNodePlacement(newspec.NodePlacement, specField.Child("nodePlacement"))...) + errs = append(errs, validateUpstreamResolversUpdate(newspec.UpstreamResolvers, oldspec.UpstreamResolvers, specField.Child("upstreamResolvers"))...) + errs = append(errs, validateServersUpdate(newspec.Servers, oldspec.Servers, specField.Child("servers"))...) + return errs +} + +// validateDNSSpec validates the spec.nodePlacement field of a DNS. +func validateDNSNodePlacement(nodePlacement operatorv1.DNSNodePlacement, fldPath *field.Path) field.ErrorList { + var errs field.ErrorList + if len(nodePlacement.NodeSelector) != 0 { + errs = append(errs, unversionedvalidation.ValidateLabels(nodePlacement.NodeSelector, fldPath.Child("nodeSelector"))...) + } + if len(nodePlacement.Tolerations) != 0 { + errs = append(errs, validateTolerations(nodePlacement.Tolerations, fldPath.Child("tolerations"))...) + } + return errs +} + +// validateTolerations validates a slice of corev1.Toleration. +func validateTolerations(versionedTolerations []corev1.Toleration, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + unversionedTolerations := make([]api.Toleration, len(versionedTolerations)) + for i := range versionedTolerations { + if err := k8s_api_v1.Convert_v1_Toleration_To_core_Toleration(&versionedTolerations[i], &unversionedTolerations[i], nil); err != nil { + allErrors = append(allErrors, field.Invalid(fldPath.Index(i), unversionedTolerations[i], err.Error())) + } + } + allErrors = append(allErrors, apivalidation.ValidateTolerations(unversionedTolerations, fldPath)...) + return allErrors +} + +// validateUpstreamResolversCreate validates configuration of the Upstream objects when TLS is configured. +func validateUpstreamResolversCreate(upstreamResolvers operatorv1.UpstreamResolvers, fieldPath *field.Path) field.ErrorList { + var errs field.ErrorList + + errs = append(errs, validateDNSTransportConfig(upstreamResolvers.TransportConfig, fieldPath.Child("transportConfig"))...) + + if upstreamResolvers.TransportConfig.Transport == operatorv1.TLSTransport { + // Transport is TLS so we must check if there are mixed Upstream types. SystemResolveConf is not allowed with TLS. + for i, upstream := range upstreamResolvers.Upstreams { + if upstream.Type == operatorv1.SystemResolveConfType { + errMessage := "SystemResolvConf is not allowed when TLS is configured as the transport" + errs = append(errs, field.Invalid(fieldPath.Child("upstreams").Index(i).Child("type"), upstream.Type, errMessage)) + } + } + } + + return errs +} + +// validateUpstreamResolversUpdate validates configuration of the Upstream objects when TLS is configured. +func validateUpstreamResolversUpdate(newUpstreamResolvers operatorv1.UpstreamResolvers, oldUpstreamResolvers operatorv1.UpstreamResolvers, fieldPath *field.Path) field.ErrorList { + var errs field.ErrorList + newTransport := newUpstreamResolvers.TransportConfig.Transport + + if !reflect.DeepEqual(newUpstreamResolvers.TransportConfig, oldUpstreamResolvers.TransportConfig) || isKnownTransport(newTransport) { + errs = append(errs, validateUpstreamResolversCreate(newUpstreamResolvers, fieldPath)...) + } + + return errs +} + +func isKnownTransport(transport operatorv1.DNSTransport) bool { + switch transport { + case "", operatorv1.CleartextTransport, operatorv1.TLSTransport: + return true + default: + return false + } + +} + +func validateServersCreate(servers []operatorv1.Server, fieldPath *field.Path) field.ErrorList { + var errs field.ErrorList + for i, server := range servers { + errs = append(errs, validateDNSTransportConfig(server.ForwardPlugin.TransportConfig, fieldPath.Index(i).Child("forwardPlugin").Child("transportConfig"))...) + } + return errs +} + +func validateServersUpdate(newServers []operatorv1.Server, oldServers []operatorv1.Server, fieldPath *field.Path) field.ErrorList { + var errs field.ErrorList + for i, newServer := range newServers { + for _, oldServer := range oldServers { + // Use server.Name as the pivot for comparison since a cluster admin could conceivably change the transport + // and/or upstreams, making those insufficient for comparison. + if newServer.Name == oldServer.Name { + // TransportConfig has changed + if !reflect.DeepEqual(newServer.ForwardPlugin.TransportConfig, oldServer.ForwardPlugin.TransportConfig) { + errs = append(validateDNSTransportConfig(newServer.ForwardPlugin.TransportConfig, fieldPath.Index(i).Child("forwardPlugin").Child("transportConfig"))) + } + } + } + } + return errs +} + +func validateDNSTransportConfig(transportConfig operatorv1.DNSTransportConfig, fieldPath *field.Path) field.ErrorList { + var errs field.ErrorList + var emptyTransportConfig operatorv1.DNSTransportConfig + tlsConfig := transportConfig.TLS + + // No validation is needed on an empty TransportConfig. + if transportConfig == emptyTransportConfig { + return errs + } + + switch transportConfig.Transport { + case "", operatorv1.CleartextTransport: + // Don't allow TLS configuration when using empty or Cleartext + if tlsConfig != nil { + errs = append(errs, field.Invalid(fieldPath.Child("tls"), transportConfig.TLS, "TLS must not be configured when using an empty or cleartext transport")) + } + case operatorv1.TLSTransport: + // When Transport is TLS, there MUST be a ServerName configured. + if tlsConfig == nil || tlsConfig.ServerName == "" { + errs = append(errs, field.Required(fieldPath.Child("tls").Child("serverName"), "transportConfig requires a serverName when transport is TLS")) + } + default: + errs = append(errs, field.Invalid(fieldPath.Child("transport"), transportConfig.Transport, "unknown transport")) + } + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/dns/validate_dns_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/dns/validate_dns_test.go new file mode 100644 index 0000000000000..7e557b004447f --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/dns/validate_dns_test.go @@ -0,0 +1,899 @@ +package dns + +import ( + "testing" + + operatorv1 "github.com/openshift/api/operator/v1" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// TestFailValidateDNSSpec verifies that validateDNSSpec rejects invalid specs. +func TestFailValidateDNSSpecCreate(t *testing.T) { + errorCases := map[string]struct { + spec operatorv1.DNSSpec + errorType field.ErrorType + errorField string + }{ + "invalid toleration": { + spec: operatorv1.DNSSpec{ + NodePlacement: operatorv1.DNSNodePlacement{ + Tolerations: []corev1.Toleration{{ + Key: "x", + Operator: corev1.TolerationOpExists, + Effect: "NoExcute", + }}, + }, + }, + errorType: field.ErrorTypeNotSupported, + errorField: "spec.nodePlacement.tolerations[0].effect", + }, + "invalid node selector": { + spec: operatorv1.DNSSpec{ + NodePlacement: operatorv1.DNSNodePlacement{ + NodeSelector: map[string]string{ + "-": "foo", + }, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.nodePlacement.nodeSelector", + }, + "SystemResolveConfType Upstream with TLS configured": { + spec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + }, + }, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.upstreamResolvers.upstreams[0].type", + }, + "Mixed Upstream types with TLS configured": { + spec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + }, + { + Type: operatorv1.NetworkResolverType, + Address: "1.1.1.1", + Port: 7777, + }, + }, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.upstreamResolvers.upstreams[0].type", + }, + "Unknown Transport configured": { + spec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + Upstreams: []operatorv1.Upstream{}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "random", + }, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.upstreamResolvers.transportConfig.transport", + }, + "ForwardPlugin configured with TLS and without ServerName": { + spec: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "tls-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "", + }, + }, + }, + }, + }, + }, + errorType: field.ErrorTypeRequired, + errorField: "spec.servers[0].forwardPlugin.transportConfig.tls.serverName", + }, + "ForwardPlugin configured with Cleartext and TLS configuration": { + spec: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + {Name: "tls-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.CleartextTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }}, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.servers[0].forwardPlugin.transportConfig.tls", + }, + } + + for tcName, tc := range errorCases { + errs := validateDNSSpecCreate(tc.spec) + if len(errs) == 0 { + t.Errorf("%q: should have failed but did not", tcName) + } + + for _, e := range errs { + if e.Type != tc.errorType { + t.Errorf("%q: expected errors of type '%s', got %v:", tcName, tc.errorType, e) + } + + if e.Field != tc.errorField { + t.Errorf("%q: expected errors in field '%s', got %v:", tcName, tc.errorField, e) + } + } + } +} + +func TestFailValidateDNSSpecUpdate(t *testing.T) { + errorCases := map[string]struct { + oldSpec operatorv1.DNSSpec + newSpec operatorv1.DNSSpec + errorType field.ErrorType + errorField string + }{ + "UpstreamResolvers configured with unknown transport and updated to invalid cleartext config": { + oldSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "oldtransport", + }, + Upstreams: []operatorv1.Upstream{ + { + Type: "foo", + }, + }, + }, + }, + newSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + }, + }, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.upstreamResolvers.upstreams[0].type", + }, + "SystemResolveConfType Upstream with TLS configured": { + oldSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + }, + }, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + newSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + Address: "2.2.2.2", + }, + }, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.upstreamResolvers.upstreams[0].type", + }, + "UpstreamResolvers configured with unknown transport and updated to invalid TLS configuration": { + oldSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.NetworkResolverType, + Address: "1.1.1.1", + Port: 7777, + }, + }, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "unknown", + }, + }, + }, + newSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.NetworkResolverType, + Address: "1.1.1.1", + Port: 7777, + }, + }, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + }, + }, + }, + errorType: field.ErrorTypeRequired, + errorField: "spec.upstreamResolvers.transportConfig.tls.serverName", + }, + "ForwardPlugin configured with unknown transport and updated to invalid TLS configuration": { + oldSpec: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "tls-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "unknown", + }, + }, + }, + }, + }, + newSpec: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "tls-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + }, + }, + }, + }, + }, + errorType: field.ErrorTypeRequired, + errorField: "spec.servers[0].forwardPlugin.transportConfig.tls.serverName", + }, + "UpstreamResolvers TransportConfig has not changed but Upstreams has changed": { + oldSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.NetworkResolverType, + }, + }, + }, + }, + newSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + }, + }, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.upstreamResolvers.upstreams[0].type", + }, + "Servers Transport changed from known (TLS) to unknown type": { + oldSpec: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "unknown-transport-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + }, + }, + newSpec: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "unknown-transport-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "unknown", + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.servers[0].forwardPlugin.transportConfig.transport", + }, + "UpstreamResolvers Transport changed from known (TLS) to unknown type": { + oldSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + newSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "unknown", + }, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.upstreamResolvers.transportConfig.transport", + }, + "Uniform Upstream types to mixed Upstream types with TLS configured": { + oldSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.NetworkResolverType, + }, + { + Type: operatorv1.NetworkResolverType, + Address: "1.1.1.1", + Port: 7777, + }, + }, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + newSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + }, + { + Type: operatorv1.NetworkResolverType, + Address: "1.1.1.1", + Port: 7777, + }, + }, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.upstreamResolvers.upstreams[0].type", + }, + "UpstreamResolvers TLS configured without ServerName": { + oldSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + newSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "", + }, + }, + }, + }, + errorType: field.ErrorTypeRequired, + errorField: "spec.upstreamResolvers.transportConfig.tls.serverName", + }, + "ForwardPlugin configured with TLS and without ServerName": { + oldSpec: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "has-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.tls-server.com", + }, + }, + }, + }, + { + Name: "no-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.tls-server.com", + }, + }, + }, + }, + }, + }, + newSpec: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "has-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.tls-server.com", + }, + }, + }, + }, + { + Name: "no-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "", + }, + }, + }, + }, + }, + }, + errorType: field.ErrorTypeRequired, + errorField: "spec.servers[1].forwardPlugin.transportConfig.tls.serverName", + }, + "ForwardPlugin configured with Cleartext and TLS configuration": { + oldSpec: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "tls-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + }, + }, + newSpec: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "tls-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.CleartextTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.servers[0].forwardPlugin.transportConfig.tls", + }, + } + + for tcName, tc := range errorCases { + errs := validateDNSSpecUpdate(tc.newSpec, tc.oldSpec) + if len(errs) == 0 { + t.Errorf("%q: should have failed but did not", tcName) + } + + for _, e := range errs { + if e.Type != tc.errorType { + t.Errorf("%q: expected errors of type '%s', got %v:", tcName, tc.errorType, e) + } + + if e.Field != tc.errorField { + t.Errorf("%q: expected errors in field '%s', got %v:", tcName, tc.errorField, e) + } + } + } +} + +// TestSucceedValidateDNSSpec verifies that validateDNSSpec accepts valid specs. +func TestSucceedValidateDNSSpecCreate(t *testing.T) { + successCases := map[string]operatorv1.DNSSpec{ + "empty": {}, + "toleration + node selector": { + NodePlacement: operatorv1.DNSNodePlacement{ + NodeSelector: map[string]string{ + "node-role.kubernetes.io/master": "", + }, + Tolerations: []corev1.Toleration{{ + Key: "node-role.kubernetes.io/master", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoExecute, + }}, + }, + }, + "NetworkResolverType Upstream with TLS configured": { + UpstreamResolvers: operatorv1.UpstreamResolvers{ + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.NetworkResolverType, + Address: "1.1.1.1", + }, + }, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + "Mixed Upstream types without TLS configured": { + UpstreamResolvers: operatorv1.UpstreamResolvers{ + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + }, + { + Type: operatorv1.NetworkResolverType, + Address: "1.1.1.1", + Port: 7777, + }, + }, + }, + }, + "Mixed Upstream types with Cleartext configured": { + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.CleartextTransport, + }, + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + }, + { + Type: operatorv1.NetworkResolverType, + Address: "1.1.1.1", + Port: 7777, + }, + }, + }, + }, + "Mixed Upstream types with nil TransportConfig configured": { + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{}, + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + }, + { + Type: operatorv1.NetworkResolverType, + Address: "1.1.1.1", + Port: 7777, + }, + }, + }, + }, + "Mixed Upstream types with empty Transport configured": { + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "", + }, + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + }, + { + Type: operatorv1.NetworkResolverType, + Address: "1.1.1.1", + Port: 7777, + }, + }, + }, + }, + } + + for tcName, s := range successCases { + errs := validateDNSSpecCreate(s) + if len(errs) != 0 { + t.Errorf("%q: expected success, but failed: %v", tcName, errs.ToAggregate().Error()) + } + } +} + +func TestSucceedValidateDNSSpecUpdate(t *testing.T) { + testCases := []struct { + description string + new operatorv1.DNSSpec + old operatorv1.DNSSpec + }{ + { + description: "UpstreamResolvers TransportConfig has not changed but Upstreams have changed", + old: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.CleartextTransport, + }, + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + Address: "1.1.1.1", + }, + }, + }, + }, + new: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.CleartextTransport, + }, + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.NetworkResolverType, + Address: "1.1.1.1", + }, + }, + }, + }, + }, + { + description: "UpstreamResolvers unknown old transport matches unknown new transport", + old: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "oldtransport", + }, + }, + }, + new: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "oldtransport", + }, + }, + }, + }, + { + description: "UpstreamResolvers unknown old transport matches unknown new transport with Upstream changes", + old: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "oldtransport", + }, + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + }, + }, + }, + }, + new: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "oldtransport", + }, + Upstreams: []operatorv1.Upstream{ + { + Type: "random", + }, + }, + }, + }, + }, + { + description: "UpstreamResolvers TransportConfig has changed", + old: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.CleartextTransport, + }, + }, + }, + new: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + }, + { + description: "UpstreamResolvers known transport to empty", + old: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.CleartextTransport, + }, + }, + }, + new: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{}, + }, + }, + }, + { + description: "Servers TransportConfig has not changed", + old: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "tls-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.CleartextTransport, + }, + }, + }, + }, + }, + new: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "tls-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.CleartextTransport, + }, + }, + }, + }, + }, + }, + { + description: "Compare configuration by server name", + old: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "cleartext-transport-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.CleartextTransport, + }, + }, + }, + { + Name: "unknown-transport-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "unknown", + }, + }, + }, + }, + }, + new: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "unknown-transport-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "unknown", + }, + }, + }, + { + Name: "cleartext-transport-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.2"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.CleartextTransport, + }, + }, + }, + }, + }, + }, + { + description: "Servers TransportConfig has changed", + old: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "tls-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{}, + }, + }, + }, + }, + new: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "tls-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + errs := validateDNSSpecUpdate(tc.new, tc.old) + if len(errs) != 0 { + t.Errorf("%q: expected success, but failed: %v", tc.description, errs.ToAggregate().Error()) + } + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/features/validate_features.go b/openshift-kube-apiserver/admission/customresourcevalidation/features/validate_features.go new file mode 100644 index 0000000000000..fb3c07f3ff6e3 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/features/validate_features.go @@ -0,0 +1,93 @@ +package features + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateFeatureGate" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.Resource("featuregates"): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("FeatureGate"): featureGateV1{}, + }) + }) +} + +func toFeatureGateV1(uncastObj runtime.Object) (*configv1.FeatureGate, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.FeatureGate) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"FeatureGate"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type featureGateV1 struct { +} + +func (featureGateV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, allErrs := toFeatureGateV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + + return allErrs +} + +func (featureGateV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, allErrs := toFeatureGateV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + oldObj, allErrs := toFeatureGateV1(uncastOldObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return allErrs +} + +func (featureGateV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toFeatureGateV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toFeatureGateV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/helpers.go b/openshift-kube-apiserver/admission/customresourcevalidation/helpers.go new file mode 100644 index 0000000000000..9248d469a7b95 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/helpers.go @@ -0,0 +1,40 @@ +package customresourcevalidation + +import ( + "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/apis/core/validation" + + configv1 "github.com/openshift/api/config/v1" +) + +func ValidateConfigMapReference(path *field.Path, configMap configv1.ConfigMapNameReference, required bool) field.ErrorList { + return validateConfigMapSecret(path.Child("name"), configMap.Name, required, validation.ValidateConfigMapName) +} + +func ValidateSecretReference(path *field.Path, secret configv1.SecretNameReference, required bool) field.ErrorList { + return validateConfigMapSecret(path.Child("name"), secret.Name, required, validation.ValidateSecretName) +} + +func validateConfigMapSecret(path *field.Path, name string, required bool, validator validation.ValidateNameFunc) field.ErrorList { + if len(name) == 0 { + if required { + return field.ErrorList{field.Required(path, "")} + } + return nil + } + + if valErrs := validator(name, false); len(valErrs) > 0 { + return field.ErrorList{field.Invalid(path, name, strings.Join(valErrs, ", "))} + } + return nil +} + +// RequireNameCluster is a name validation function that requires the name to be cluster. It's handy for config.openshift.io types. +func RequireNameCluster(name string, prefix bool) []string { + if name != "cluster" { + return []string{"must be cluster"} + } + return nil +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/image/validate_image.go b/openshift-kube-apiserver/admission/customresourcevalidation/image/validate_image.go new file mode 100644 index 0000000000000..aa1fb01573bd6 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/image/validate_image.go @@ -0,0 +1,95 @@ +package image + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateImage" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.Resource("images"): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("Image"): imageV1{}, + }) + }) +} + +func toImageV1(uncastObj runtime.Object) (*configv1.Image, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.Image) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Image"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type imageV1 struct { +} + +func (imageV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, errs := toImageV1(uncastObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj + errs = append(errs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + + return errs +} + +func (imageV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toImageV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toImageV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} + +func (imageV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toImageV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toImageV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/kubecontrollermanager/validate_kubecontrollermanager.go b/openshift-kube-apiserver/admission/customresourcevalidation/kubecontrollermanager/validate_kubecontrollermanager.go new file mode 100644 index 0000000000000..8b3f0fe8be9b9 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/kubecontrollermanager/validate_kubecontrollermanager.go @@ -0,0 +1,114 @@ +package kubecontrollermanager + +import ( + "context" + "fmt" + "io" + + operatorv1 "github.com/openshift/api/operator/v1" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "operator.openshift.io/ValidateKubeControllerManager" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + operatorv1.Resource("kubecontrollermanagers"): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + operatorv1.GroupVersion.WithKind("KubeControllerManager"): kubeControllerManagerV1{}, + }) + }) +} + +func toKubeControllerManager(uncastObj runtime.Object) (*operatorv1.KubeControllerManager, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*operatorv1.KubeControllerManager) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"KubeControllerManager"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"operator.openshift.io/v1"})) + } + + return obj, nil +} + +type kubeControllerManagerV1 struct { +} + +func validateKubeControllerManagerSpecCreate(spec operatorv1.KubeControllerManagerSpec) field.ErrorList { + allErrs := field.ErrorList{} + + // on create, we allow anything + return allErrs +} + +func validateKubeControllerManagerSpecUpdate(spec, oldSpec operatorv1.KubeControllerManagerSpec) field.ErrorList { + allErrs := field.ErrorList{} + + // on update, fail if we go from secure to insecure + if oldSpec.UseMoreSecureServiceCA && !spec.UseMoreSecureServiceCA { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec.useMoreSecureServiceCA"), "once enabled, the more secure service-ca.crt cannot be disabled")) + } + + return allErrs +} + +func (kubeControllerManagerV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, allErrs := toKubeControllerManager(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + allErrs = append(allErrs, validateKubeControllerManagerSpecCreate(obj.Spec)...) + + return allErrs +} + +func (kubeControllerManagerV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, allErrs := toKubeControllerManager(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + oldObj, allErrs := toKubeControllerManager(uncastOldObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + allErrs = append(allErrs, validateKubeControllerManagerSpecUpdate(obj.Spec, oldObj.Spec)...) + + return allErrs +} + +func (kubeControllerManagerV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toKubeControllerManager(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toKubeControllerManager(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/network/validate_network_config.go b/openshift-kube-apiserver/admission/customresourcevalidation/network/validate_network_config.go new file mode 100644 index 0000000000000..2fbedb220b7a2 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/network/validate_network_config.go @@ -0,0 +1,128 @@ +package network + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateNetwork" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.Resource("networks"): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("Network"): networkV1{}, + }) + }) +} + +func toNetworkV1(uncastObj runtime.Object) (*configv1.Network, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.Network) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Network"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type networkV1 struct { +} + +func validateNetworkServiceNodePortRangeUpdate(obj, oldObj *configv1.Network) *field.Error { + var err error + defaultRange := kubeoptions.DefaultServiceNodePortRange + oldRange := &defaultRange + newRange := &defaultRange + + oldRangeStr := oldObj.Spec.ServiceNodePortRange + if oldRangeStr != "" { + if oldRange, err = utilnet.ParsePortRange(oldRangeStr); err != nil { + return field.Invalid(field.NewPath("spec", "serviceNodePortRange"), + oldRangeStr, + fmt.Sprintf("failed to parse the old port range: %v", err)) + } + } + newRangeStr := obj.Spec.ServiceNodePortRange + if newRangeStr != "" { + if newRange, err = utilnet.ParsePortRange(newRangeStr); err != nil { + return field.Invalid(field.NewPath("spec", "serviceNodePortRange"), + newRangeStr, + fmt.Sprintf("failed to parse the new port range: %v", err)) + } + } + if !newRange.Contains(oldRange.Base) || !newRange.Contains(oldRange.Base+oldRange.Size-1) { + return field.Invalid(field.NewPath("spec", "serviceNodePortRange"), + newRangeStr, + fmt.Sprintf("new service node port range %s does not completely cover the previous range %s", newRange, oldRange)) + } + return nil +} + +func (networkV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, allErrs := toNetworkV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + + return allErrs +} + +func (networkV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, allErrs := toNetworkV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + oldObj, allErrs := toNetworkV1(uncastOldObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + if err := validateNetworkServiceNodePortRangeUpdate(obj, oldObj); err != nil { + allErrs = append(allErrs, err) + } + + return allErrs +} + +func (networkV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toNetworkV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toNetworkV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/node/restrict_extreme_worker_latency_profile.go b/openshift-kube-apiserver/admission/customresourcevalidation/node/restrict_extreme_worker_latency_profile.go new file mode 100644 index 0000000000000..b4b63914f8d71 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/node/restrict_extreme_worker_latency_profile.go @@ -0,0 +1,124 @@ +package node + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +var rejectionScenarios = []struct { + fromProfile configv1.WorkerLatencyProfileType + toProfile configv1.WorkerLatencyProfileType +}{ + {fromProfile: "", toProfile: configv1.LowUpdateSlowReaction}, + {fromProfile: configv1.LowUpdateSlowReaction, toProfile: ""}, + {fromProfile: configv1.DefaultUpdateDefaultReaction, toProfile: configv1.LowUpdateSlowReaction}, + {fromProfile: configv1.LowUpdateSlowReaction, toProfile: configv1.DefaultUpdateDefaultReaction}, +} + +const PluginName = "config.openshift.io/RestrictExtremeWorkerLatencyProfile" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.Resource("nodes"): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("Node"): configNodeV1{}, + }) + }) +} + +func toConfigNodeV1(uncastObj runtime.Object) (*configv1.Node, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.Node) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Node"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type configNodeV1 struct{} + +func validateConfigNodeForExtremeLatencyProfile(obj, oldObj *configv1.Node) *field.Error { + fromProfile := oldObj.Spec.WorkerLatencyProfile + toProfile := obj.Spec.WorkerLatencyProfile + + for _, rejectionScenario := range rejectionScenarios { + if fromProfile == rejectionScenario.fromProfile && toProfile == rejectionScenario.toProfile { + return field.Invalid(field.NewPath("spec", "workerLatencyProfile"), obj.Spec.WorkerLatencyProfile, + fmt.Sprintf( + "cannot update worker latency profile from %q to %q as extreme profile transition is unsupported, please select any other profile with supported transition such as %q", + oldObj.Spec.WorkerLatencyProfile, + obj.Spec.WorkerLatencyProfile, + configv1.MediumUpdateAverageReaction, + ), + ) + } + } + return nil +} + +func (configNodeV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, allErrs := toConfigNodeV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + + return allErrs +} + +func (configNodeV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, allErrs := toConfigNodeV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + oldObj, allErrs := toConfigNodeV1(uncastOldObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + if err := validateConfigNodeForExtremeLatencyProfile(obj, oldObj); err != nil { + allErrs = append(allErrs, err) + } + + return allErrs +} + +func (configNodeV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toConfigNodeV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toConfigNodeV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/node/restrict_extreme_worker_latency_profile_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/node/restrict_extreme_worker_latency_profile_test.go new file mode 100644 index 0000000000000..b22c6a2da90a1 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/node/restrict_extreme_worker_latency_profile_test.go @@ -0,0 +1,68 @@ +package node + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + configv1 "github.com/openshift/api/config/v1" +) + +func TestValidateConfigNodeForExtremeLatencyProfile(t *testing.T) { + testCases := []struct { + fromProfile configv1.WorkerLatencyProfileType + toProfile configv1.WorkerLatencyProfileType + shouldReject bool + }{ + // no rejections + {fromProfile: "", toProfile: "", shouldReject: false}, + {fromProfile: "", toProfile: configv1.DefaultUpdateDefaultReaction, shouldReject: false}, + {fromProfile: "", toProfile: configv1.MediumUpdateAverageReaction, shouldReject: false}, + {fromProfile: configv1.DefaultUpdateDefaultReaction, toProfile: "", shouldReject: false}, + {fromProfile: configv1.DefaultUpdateDefaultReaction, toProfile: configv1.DefaultUpdateDefaultReaction, shouldReject: false}, + {fromProfile: configv1.DefaultUpdateDefaultReaction, toProfile: configv1.MediumUpdateAverageReaction, shouldReject: false}, + {fromProfile: configv1.MediumUpdateAverageReaction, toProfile: "", shouldReject: false}, + {fromProfile: configv1.MediumUpdateAverageReaction, toProfile: configv1.DefaultUpdateDefaultReaction, shouldReject: false}, + {fromProfile: configv1.MediumUpdateAverageReaction, toProfile: configv1.MediumUpdateAverageReaction, shouldReject: false}, + {fromProfile: configv1.MediumUpdateAverageReaction, toProfile: configv1.LowUpdateSlowReaction, shouldReject: false}, + {fromProfile: configv1.LowUpdateSlowReaction, toProfile: configv1.MediumUpdateAverageReaction, shouldReject: false}, + {fromProfile: configv1.LowUpdateSlowReaction, toProfile: configv1.LowUpdateSlowReaction, shouldReject: false}, + + // rejections + {fromProfile: "", toProfile: configv1.LowUpdateSlowReaction, shouldReject: true}, + {fromProfile: configv1.DefaultUpdateDefaultReaction, toProfile: configv1.LowUpdateSlowReaction, shouldReject: true}, + {fromProfile: configv1.LowUpdateSlowReaction, toProfile: "", shouldReject: true}, + {fromProfile: configv1.LowUpdateSlowReaction, toProfile: configv1.DefaultUpdateDefaultReaction, shouldReject: true}, + } + + for _, testCase := range testCases { + shouldStr := "should not be" + if testCase.shouldReject { + shouldStr = "should be" + } + testCaseName := fmt.Sprintf("update from profile %s to %s %s rejected", testCase.fromProfile, testCase.toProfile, shouldStr) + t.Run(testCaseName, func(t *testing.T) { + // config node objects + oldObject := configv1.Node{ + Spec: configv1.NodeSpec{ + WorkerLatencyProfile: testCase.fromProfile, + }, + } + newObject := configv1.Node{ + Spec: configv1.NodeSpec{ + WorkerLatencyProfile: testCase.toProfile, + }, + } + + fieldErr := validateConfigNodeForExtremeLatencyProfile(&oldObject, &newObject) + assert.Equal(t, testCase.shouldReject, fieldErr != nil, "latency profile from %q to %q %s rejected", testCase.fromProfile, testCase.toProfile, shouldStr) + + if testCase.shouldReject { + assert.Equal(t, "spec.workerLatencyProfile", fieldErr.Field, "field name during for latency profile should be spec.workerLatencyProfile") + assert.Contains(t, fieldErr.Detail, testCase.fromProfile, "error message should contain %q", testCase.fromProfile) + assert.Contains(t, fieldErr.Detail, testCase.toProfile, "error message should contain %q", testCase.toProfile) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/helpers.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/helpers.go new file mode 100644 index 0000000000000..126a53bb9ac1c --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/helpers.go @@ -0,0 +1,32 @@ +package oauth + +import ( + kvalidation "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + netutils "k8s.io/utils/net" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/config/validation" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +func isValidHostname(hostname string) bool { + return len(kvalidation.IsDNS1123Subdomain(hostname)) == 0 || netutils.ParseIPSloppy(hostname) != nil +} + +func ValidateRemoteConnectionInfo(remoteConnectionInfo configv1.OAuthRemoteConnectionInfo, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(remoteConnectionInfo.URL) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("url"), "")) + } else { + _, urlErrs := validation.ValidateSecureURL(remoteConnectionInfo.URL, fldPath.Child("url")) + allErrs = append(allErrs, urlErrs...) + } + + allErrs = append(allErrs, crvalidation.ValidateConfigMapReference(fldPath.Child("ca"), remoteConnectionInfo.CA, false)...) + allErrs = append(allErrs, crvalidation.ValidateSecretReference(fldPath.Child("tlsClientCert"), remoteConnectionInfo.TLSClientCert, false)...) + allErrs = append(allErrs, crvalidation.ValidateSecretReference(fldPath.Child("tlsClientKey"), remoteConnectionInfo.TLSClientKey, false)...) + + return allErrs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_github.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_github.go new file mode 100644 index 0000000000000..2ae0b45254a14 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_github.go @@ -0,0 +1,69 @@ +package oauth + +import ( + "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +func ValidateGitHubIdentityProvider(provider *configv1.GitHubIdentityProvider, mappingMethod configv1.MappingMethodType, fieldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + if provider == nil { + errs = append(errs, field.Required(fieldPath, "")) + return errs + } + + errs = append(errs, ValidateOAuthIdentityProvider(provider.ClientID, provider.ClientSecret, fieldPath.Child("provider"))...) + + if len(provider.Teams) > 0 && len(provider.Organizations) > 0 { + errs = append(errs, field.Invalid(fieldPath.Child("organizations"), provider.Organizations, "specify organizations or teams, not both")) + errs = append(errs, field.Invalid(fieldPath.Child("teams"), provider.Teams, "specify organizations or teams, not both")) + } + + // only check that there are some teams/orgs if not GitHub Enterprise Server + if len(provider.Hostname) == 0 && len(provider.Teams) == 0 && len(provider.Organizations) == 0 && mappingMethod != configv1.MappingMethodLookup { + errs = append(errs, field.Invalid(fieldPath, nil, "one of organizations or teams must be specified unless hostname is set or lookup is used")) + } + for i, organization := range provider.Organizations { + if strings.Contains(organization, "/") { + errs = append(errs, field.Invalid(fieldPath.Child("organizations").Index(i), organization, "cannot contain /")) + } + if len(organization) == 0 { + errs = append(errs, field.Required(fieldPath.Child("organizations").Index(i), "cannot be empty")) + } + } + for i, team := range provider.Teams { + if split := strings.Split(team, "/"); len(split) != 2 { + errs = append(errs, field.Invalid(fieldPath.Child("teams").Index(i), team, "must be in the format /")) + } else if org, t := split[0], split[1]; len(org) == 0 || len(t) == 0 { + errs = append(errs, field.Invalid(fieldPath.Child("teams").Index(i), team, "must be in the format /")) + } + } + + if hostname := provider.Hostname; len(hostname) != 0 { + hostnamePath := fieldPath.Child("hostname") + + if hostname == "github.com" || strings.HasSuffix(hostname, ".github.com") { + errs = append(errs, field.Invalid(hostnamePath, hostname, "cannot equal [*.]github.com")) + } + + if !isValidHostname(hostname) { + errs = append(errs, field.Invalid(hostnamePath, hostname, "must be a valid DNS subdomain or IP address")) + } + } + + if caFile := provider.CA; len(caFile.Name) != 0 { + caPath := fieldPath.Child("ca") + + errs = append(errs, crvalidation.ValidateConfigMapReference(caPath, caFile, true)...) + + if len(provider.Hostname) == 0 { + errs = append(errs, field.Invalid(caPath, caFile, "cannot be specified when hostname is empty")) + } + } + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_github_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_github_test.go new file mode 100644 index 0000000000000..10102f24e45e8 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_github_test.go @@ -0,0 +1,249 @@ +package oauth + +import ( + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" +) + +func TestValidateGitHubIdentityProvider(t *testing.T) { + type args struct { + provider *configv1.GitHubIdentityProvider + mappingMethod configv1.MappingMethodType + fieldPath *field.Path + } + tests := []struct { + name string + args args + errors field.ErrorList + }{ + { + name: "cannot use GH as hostname", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "github.com", + CA: configv1.ConfigMapNameReference{Name: "caconfigmap"}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "hostname", BadValue: "github.com", Detail: "cannot equal [*.]github.com"}, + }, + }, + { + name: "cannot use GH subdomain as hostname", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "foo.github.com", + CA: configv1.ConfigMapNameReference{Name: "caconfigmap"}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "hostname", BadValue: "foo.github.com", Detail: "cannot equal [*.]github.com"}, + }, + }, + { + name: "valid domain hostname", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "company.com", + CA: configv1.ConfigMapNameReference{Name: "caconfigmap"}, + }, + mappingMethod: "", + }, + }, + { + name: "valid ip hostname", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "192.168.8.1", + CA: configv1.ConfigMapNameReference{Name: "caconfigmap"}, + }, + mappingMethod: "", + }, + }, + { + name: "invalid ip hostname with port", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "192.168.8.1:8080", + CA: configv1.ConfigMapNameReference{Name: "caconfigmap"}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "hostname", BadValue: "192.168.8.1:8080", Detail: "must be a valid DNS subdomain or IP address"}, + }, + }, + { + name: "invalid domain hostname", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "google-.com", + CA: configv1.ConfigMapNameReference{Name: "caconfigmap"}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "hostname", BadValue: "google-.com", Detail: "must be a valid DNS subdomain or IP address"}, + }, + }, + { + name: "invalid name in ca ref and no hostname", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "", + CA: configv1.ConfigMapNameReference{Name: "ca&config-map"}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "ca.name", BadValue: "ca&config-map", Detail: "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')"}, + {Type: field.ErrorTypeInvalid, Field: "ca", BadValue: configv1.ConfigMapNameReference{Name: "ca&config-map"}, Detail: "cannot be specified when hostname is empty"}, + }, + }, + { + name: "valid ca and hostname", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "mo.co", + CA: configv1.ConfigMapNameReference{Name: "ca-config-map"}, + }, + mappingMethod: "", + }, + }, + { + name: "GitHub requires client ID and secret", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "", + ClientSecret: configv1.SecretNameReference{}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "", + CA: configv1.ConfigMapNameReference{}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeRequired, Field: "provider.clientID", BadValue: "", Detail: ""}, + {Type: field.ErrorTypeRequired, Field: "provider.clientSecret.name", BadValue: "", Detail: ""}, + }, + }, + { + name: "GitHub warns when not constrained to organizations or teams without lookup", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: nil, + Teams: nil, + Hostname: "", + CA: configv1.ConfigMapNameReference{}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "", BadValue: nil, Detail: "one of organizations or teams must be specified unless hostname is set or lookup is used"}, + }, + }, + { + name: "GitHub does not warn when not constrained to organizations or teams with lookup", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: nil, + Teams: nil, + Hostname: "", + CA: configv1.ConfigMapNameReference{}, + }, + mappingMethod: "lookup", + }, + }, + { + name: "invalid cannot specific both organizations and teams", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: []string{"org1/team1"}, + Hostname: "", + CA: configv1.ConfigMapNameReference{}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "organizations", BadValue: []string{"org1"}, Detail: "specify organizations or teams, not both"}, + {Type: field.ErrorTypeInvalid, Field: "teams", BadValue: []string{"org1/team1"}, Detail: "specify organizations or teams, not both"}, + }, + }, + { + name: "invalid team format", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: nil, + Teams: []string{"org1/team1", "org2/not/team2", "org3//team3", "", "org4/team4"}, + Hostname: "", + CA: configv1.ConfigMapNameReference{}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "teams[1]", BadValue: "org2/not/team2", Detail: "must be in the format /"}, + {Type: field.ErrorTypeInvalid, Field: "teams[2]", BadValue: "org3//team3", Detail: "must be in the format /"}, + {Type: field.ErrorTypeInvalid, Field: "teams[3]", BadValue: "", Detail: "must be in the format /"}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := ValidateGitHubIdentityProvider(tt.args.provider, tt.args.mappingMethod, tt.args.fieldPath) + if tt.errors == nil && len(got) == 0 { + return + } + if !reflect.DeepEqual(got, tt.errors) { + t.Errorf("ValidateGitHubIdentityProvider() = %v, want %v", got, tt.errors) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_gitlab.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_gitlab.go new file mode 100644 index 0000000000000..ea9fda2ab4d8c --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_gitlab.go @@ -0,0 +1,26 @@ +package oauth + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/config/validation" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +func ValidateGitLabIdentityProvider(provider *configv1.GitLabIdentityProvider, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if provider == nil { + allErrs = append(allErrs, field.Required(fieldPath, "")) + return allErrs + } + + allErrs = append(allErrs, ValidateOAuthIdentityProvider(provider.ClientID, provider.ClientSecret, fieldPath)...) + + _, urlErrs := validation.ValidateSecureURL(provider.URL, fieldPath.Child("url")) + allErrs = append(allErrs, urlErrs...) + + allErrs = append(allErrs, crvalidation.ValidateConfigMapReference(fieldPath.Child("ca"), provider.CA, false)...) + + return allErrs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_gitlab_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_gitlab_test.go new file mode 100644 index 0000000000000..9ce73cdc731ee --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_gitlab_test.go @@ -0,0 +1,104 @@ +package oauth + +import ( + "reflect" + "testing" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func gitlabIDP() configv1.IdentityProviderConfig { + return configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeGitLab, + GitLab: &configv1.GitLabIdentityProvider{ + ClientID: "masterOfInstances", + ClientSecret: configv1.SecretNameReference{Name: "secret-gitlab-secret"}, + URL: "https://thisgitlabinstancerighthere.com", + CA: configv1.ConfigMapNameReference{Name: "letsencrypt-for-gitlab.instance"}, + }, + } +} + +func TestValidateGitLabIdentityProvider(t *testing.T) { + type args struct { + provider *configv1.GitLabIdentityProvider + fieldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "nil input provider", + want: field.ErrorList{ + field.Required(nil, ""), + }, + }, + { + name: "insecure URL", + args: args{ + provider: &configv1.GitLabIdentityProvider{ + ClientID: "hereBeMyId", + ClientSecret: configv1.SecretNameReference{Name: "gitlab-client-sec"}, + URL: "http://anyonecanseemenow.com", + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("url"), "http://anyonecanseemenow.com", "must use https scheme"), + }, + }, + { + name: "missing client ID and secret", + args: args{ + provider: &configv1.GitLabIdentityProvider{ + URL: "https://privategitlab.com", + }, + }, + want: field.ErrorList{ + field.Required(field.NewPath("clientID"), ""), + field.Required(field.NewPath("clientSecret", "name"), ""), + }, + }, + { + name: "invalid CA ref name", + args: args{ + provider: &configv1.GitLabIdentityProvider{ + ClientID: "hereBeMyId", + ClientSecret: configv1.SecretNameReference{Name: "gitlab-client-sec"}, + URL: "https://anyonecanseemenow.com", + CA: configv1.ConfigMapNameReference{Name: "veryBadRefName?:("}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("ca", "name"), "veryBadRefName?:(", wrongConfigMapSecretErrMsg), + }, + }, + { + name: "minimal passing case", + args: args{ + provider: &configv1.GitLabIdentityProvider{ + ClientID: "hereBeMyId", + ClientSecret: configv1.SecretNameReference{Name: "gitlab-client-sec"}, + URL: "https://anyonecanseemenow.com", + }, + }, + want: field.ErrorList{}, + }, + { + name: "more complicated case", + args: args{ + provider: gitlabIDP().GitLab, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ValidateGitLabIdentityProvider(tt.args.provider, tt.args.fieldPath); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ValidateGitLabIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_google.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_google.go new file mode 100644 index 0000000000000..481b162cf756b --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_google.go @@ -0,0 +1,23 @@ +package oauth + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" +) + +func ValidateGoogleIdentityProvider(provider *configv1.GoogleIdentityProvider, mappingMethod configv1.MappingMethodType, fieldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + if provider == nil { + errs = append(errs, field.Required(fieldPath, "")) + return errs + } + + errs = append(errs, ValidateOAuthIdentityProvider(provider.ClientID, provider.ClientSecret, fieldPath)...) + + if len(provider.HostedDomain) == 0 && mappingMethod != configv1.MappingMethodLookup { + errs = append(errs, field.Invalid(fieldPath.Child("hostedDomain"), nil, "hostedDomain must be specified unless lookup is used")) + } + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_google_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_google_test.go new file mode 100644 index 0000000000000..88306d0f1919f --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_google_test.go @@ -0,0 +1,90 @@ +package oauth + +import ( + "reflect" + "testing" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func googleIDP() configv1.IdentityProviderConfig { + return configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeGoogle, + Google: &configv1.GoogleIdentityProvider{ + ClientID: "masterOfInstances", + ClientSecret: configv1.SecretNameReference{Name: "secret-google-secret"}, + HostedDomain: "myprivategoogledomain.com", + }, + } +} + +func TestValidateGoogleIdentityProvider(t *testing.T) { + type args struct { + provider *configv1.GoogleIdentityProvider + mappingMethod configv1.MappingMethodType + fieldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "nil input provider", + want: field.ErrorList{ + field.Required(nil, ""), + }, + }, + { + name: "missing client ID and secret", + args: args{ + provider: &configv1.GoogleIdentityProvider{ + HostedDomain: "myprivategoogledomain.com", + }, + }, + want: field.ErrorList{ + field.Required(field.NewPath("clientID"), ""), + field.Required(field.NewPath("clientSecret", "name"), ""), + }, + }, + { + name: "no hosted domain with mapping method != 'lookup'", + args: args{ + provider: &configv1.GoogleIdentityProvider{ + ClientID: "masterOfInstances", + ClientSecret: configv1.SecretNameReference{Name: "secret-google-secret"}, + }, + mappingMethod: configv1.MappingMethodClaim, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("hostedDomain"), nil, "hostedDomain must be specified unless lookup is used"), + }, + }, + { + name: "no hosted domain with mapping method == 'lookup'", + args: args{ + provider: &configv1.GoogleIdentityProvider{ + ClientID: "masterOfInstances", + ClientSecret: configv1.SecretNameReference{Name: "secret-google-secret"}, + }, + mappingMethod: configv1.MappingMethodLookup, + }, + want: field.ErrorList{}, + }, + { + name: "working example", + args: args{ + provider: googleIDP().Google, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ValidateGoogleIdentityProvider(tt.args.provider, tt.args.mappingMethod, tt.args.fieldPath); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ValidateGoogleIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_idp.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_idp.go new file mode 100644 index 0000000000000..86e8158c95799 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_idp.go @@ -0,0 +1,215 @@ +package oauth + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/validation/path" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + pointerutil "k8s.io/utils/pointer" + + configv1 "github.com/openshift/api/config/v1" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const ( + // MinimumInactivityTimeoutSeconds defines the the smallest value allowed + // for AccessTokenInactivityTimeoutSeconds. + // It also defines the ticker interval for the token update routine as + // MinimumInactivityTimeoutSeconds / 3 is used there. + MinimumInactivityTimeoutSeconds = 5 * 60 +) + +var validMappingMethods = sets.NewString( + string(configv1.MappingMethodLookup), + string(configv1.MappingMethodClaim), + string(configv1.MappingMethodAdd), +) + +func validateOAuthSpec(spec configv1.OAuthSpec) field.ErrorList { + errs := field.ErrorList{} + specPath := field.NewPath("spec") + + providerNames := sets.NewString() + + challengeIssuingIdentityProviders := []string{} + challengeRedirectingIdentityProviders := []string{} + + // TODO move to ValidateIdentityProviders (plural) + for i, identityProvider := range spec.IdentityProviders { + if isUsedAsChallenger(identityProvider.IdentityProviderConfig) { + // TODO fix CAO to properly let you use request header and other challengers by disabling the other ones on CLI + // RequestHeaderIdentityProvider is special, it can only react to challenge clients by redirecting them + // Make sure we don't have more than a single redirector, and don't have a mix of challenge issuers and redirectors + if identityProvider.Type == configv1.IdentityProviderTypeRequestHeader { + challengeRedirectingIdentityProviders = append(challengeRedirectingIdentityProviders, identityProvider.Name) + } else { + challengeIssuingIdentityProviders = append(challengeIssuingIdentityProviders, identityProvider.Name) + } + } + + identityProviderPath := specPath.Child("identityProviders").Index(i) + errs = append(errs, ValidateIdentityProvider(identityProvider, identityProviderPath)...) + + if len(identityProvider.Name) > 0 { + if providerNames.Has(identityProvider.Name) { + errs = append(errs, field.Invalid(identityProviderPath.Child("name"), identityProvider.Name, "must have a unique name")) + } + providerNames.Insert(identityProvider.Name) + } + } + + if len(challengeRedirectingIdentityProviders) > 1 { + errs = append(errs, field.Invalid(specPath.Child("identityProviders"), "", fmt.Sprintf("only one identity provider can redirect clients requesting an authentication challenge, found: %v", strings.Join(challengeRedirectingIdentityProviders, ", ")))) + } + if len(challengeRedirectingIdentityProviders) > 0 && len(challengeIssuingIdentityProviders) > 0 { + errs = append(errs, field.Invalid(specPath.Child("identityProviders"), "", fmt.Sprintf( + "cannot mix providers that redirect clients requesting auth challenges (%s) with providers issuing challenges to those clients (%s)", + strings.Join(challengeRedirectingIdentityProviders, ", "), + strings.Join(challengeIssuingIdentityProviders, ", "), + ))) + } + + // TODO move to ValidateTokenConfig + timeout := spec.TokenConfig.AccessTokenInactivityTimeout + if timeout != nil && timeout.Seconds() < MinimumInactivityTimeoutSeconds { + errs = append(errs, field.Invalid( + specPath.Child("tokenConfig", "accessTokenInactivityTimeout"), timeout, + fmt.Sprintf("the minimum acceptable token timeout value is %d seconds", + MinimumInactivityTimeoutSeconds))) + } + + if tokenMaxAge := spec.TokenConfig.AccessTokenMaxAgeSeconds; tokenMaxAge < 0 { + errs = append(errs, field.Invalid(specPath.Child("tokenConfig", "accessTokenMaxAgeSeconds"), tokenMaxAge, "must be a positive integer or 0")) + } + + // TODO move to ValidateTemplates + errs = append(errs, crvalidation.ValidateSecretReference(specPath.Child("templates", "login"), spec.Templates.Login, false)...) + errs = append(errs, crvalidation.ValidateSecretReference(specPath.Child("templates", "providerSelection"), spec.Templates.ProviderSelection, false)...) + errs = append(errs, crvalidation.ValidateSecretReference(specPath.Child("templates", "error"), spec.Templates.Error, false)...) + + return errs +} + +// if you change this, update the peer in user validation. also, don't change this. +func validateIdentityProviderName(name string) []string { + if reasons := path.ValidatePathSegmentName(name, false); len(reasons) != 0 { + return reasons + } + + if strings.Contains(name, ":") { + return []string{`may not contain ":"`} + } + return nil +} + +func ValidateIdentityProvider(identityProvider configv1.IdentityProvider, fldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + + if len(identityProvider.Name) == 0 { + errs = append(errs, field.Required(fldPath.Child("name"), "")) + } else if reasons := validateIdentityProviderName(identityProvider.Name); len(reasons) != 0 { + errs = append(errs, field.Invalid(fldPath.Child("name"), identityProvider.Name, strings.Join(reasons, ", "))) + } + + if len(identityProvider.MappingMethod) > 0 && !validMappingMethods.Has(string(identityProvider.MappingMethod)) { + errs = append(errs, field.NotSupported(fldPath.Child("mappingMethod"), identityProvider.MappingMethod, validMappingMethods.List())) + } + + provider := identityProvider.IdentityProviderConfig + // create a copy of the provider to simplify checking that only one IdPs is set + providerCopy := provider.DeepCopy() + switch provider.Type { + case "": + errs = append(errs, field.Required(fldPath.Child("type"), "")) + + case configv1.IdentityProviderTypeRequestHeader: + errs = append(errs, ValidateRequestHeaderIdentityProvider(provider.RequestHeader, fldPath)...) + providerCopy.RequestHeader = nil + + case configv1.IdentityProviderTypeBasicAuth: + // TODO move to ValidateBasicAuthIdentityProvider for consistency + if provider.BasicAuth == nil { + errs = append(errs, field.Required(fldPath.Child("basicAuth"), "")) + } else { + errs = append(errs, ValidateRemoteConnectionInfo(provider.BasicAuth.OAuthRemoteConnectionInfo, fldPath.Child("basicAuth"))...) + } + providerCopy.BasicAuth = nil + + case configv1.IdentityProviderTypeHTPasswd: + // TODO move to ValidateHTPasswdIdentityProvider for consistency + if provider.HTPasswd == nil { + errs = append(errs, field.Required(fldPath.Child("htpasswd"), "")) + } else { + errs = append(errs, crvalidation.ValidateSecretReference(fldPath.Child("htpasswd", "fileData"), provider.HTPasswd.FileData, true)...) + } + providerCopy.HTPasswd = nil + + case configv1.IdentityProviderTypeLDAP: + errs = append(errs, ValidateLDAPIdentityProvider(provider.LDAP, fldPath.Child("ldap"))...) + providerCopy.LDAP = nil + + case configv1.IdentityProviderTypeKeystone: + errs = append(errs, ValidateKeystoneIdentityProvider(provider.Keystone, fldPath.Child("keystone"))...) + providerCopy.Keystone = nil + + case configv1.IdentityProviderTypeGitHub: + errs = append(errs, ValidateGitHubIdentityProvider(provider.GitHub, identityProvider.MappingMethod, fldPath.Child("github"))...) + providerCopy.GitHub = nil + + case configv1.IdentityProviderTypeGitLab: + errs = append(errs, ValidateGitLabIdentityProvider(provider.GitLab, fldPath.Child("gitlab"))...) + providerCopy.GitLab = nil + + case configv1.IdentityProviderTypeGoogle: + errs = append(errs, ValidateGoogleIdentityProvider(provider.Google, identityProvider.MappingMethod, fldPath.Child("google"))...) + providerCopy.Google = nil + + case configv1.IdentityProviderTypeOpenID: + errs = append(errs, ValidateOpenIDIdentityProvider(provider.OpenID, fldPath.Child("openID"))...) + providerCopy.OpenID = nil + + default: + errs = append(errs, field.Invalid(fldPath.Child("type"), identityProvider.Type, "not a valid provider type")) + } + + if !pointerutil.AllPtrFieldsNil(providerCopy) { + errs = append(errs, field.Invalid(fldPath, identityProvider.IdentityProviderConfig, "only one identity provider can be configured in single object")) + } + + return errs +} + +func ValidateOAuthIdentityProvider(clientID string, clientSecretRef configv1.SecretNameReference, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(clientID) == 0 { + allErrs = append(allErrs, field.Required(fieldPath.Child("clientID"), "")) + } + + allErrs = append(allErrs, crvalidation.ValidateSecretReference(fieldPath.Child("clientSecret"), clientSecretRef, true)...) + + return allErrs +} + +func isUsedAsChallenger(idp configv1.IdentityProviderConfig) bool { + // TODO this is wrong and needs to be more dynamic... + switch idp.Type { + // whitelist all the IdPs that we set `UseAsChallenger: true` in cluster-authentication-operator + case configv1.IdentityProviderTypeBasicAuth, configv1.IdentityProviderTypeGitLab, + configv1.IdentityProviderTypeHTPasswd, configv1.IdentityProviderTypeKeystone, + configv1.IdentityProviderTypeLDAP, + // guard open ID for now because it *could* have challenge in the future + configv1.IdentityProviderTypeOpenID: + return true + case configv1.IdentityProviderTypeRequestHeader: + if idp.RequestHeader == nil { + // this is an error reported elsewhere + return false + } + return len(idp.RequestHeader.ChallengeURL) > 0 + default: + return false + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_idp_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_idp_test.go new file mode 100644 index 0000000000000..af0aa6cfa4d82 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_idp_test.go @@ -0,0 +1,429 @@ +package oauth + +import ( + "fmt" + "reflect" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" +) + +const wrongConfigMapSecretErrMsg string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')" + +func htpasswdIDP() configv1.IdentityProviderConfig { + return configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeHTPasswd, + HTPasswd: &configv1.HTPasswdIdentityProvider{ + FileData: configv1.SecretNameReference{ + Name: "innocent.llama", + }, + }, + } +} + +func TestValidateOAuthSpec(t *testing.T) { + doubledIdPs := configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeHTPasswd, + HTPasswd: &configv1.HTPasswdIdentityProvider{ + FileData: configv1.SecretNameReference{ + Name: "innocent.llama", + }, + }, + GitLab: &configv1.GitLabIdentityProvider{ + ClientID: "masterOfInstances", + ClientSecret: configv1.SecretNameReference{Name: "secret-gitlab-secret"}, + URL: "https://thisgitlabinstancerighthere.com", + CA: configv1.ConfigMapNameReference{Name: "letsencrypt-for-gitlab.instance"}, + }, + } + + type args struct { + spec configv1.OAuthSpec + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "empty object", + args: args{ + spec: configv1.OAuthSpec{}, + }, + }, + { + name: "more than one challenge issuing IdPs", + args: args{ + spec: configv1.OAuthSpec{ + IdentityProviders: []configv1.IdentityProvider{ + { + Name: "htpasswd", + IdentityProviderConfig: htpasswdIDP(), + }, + { + Name: "ldap", + IdentityProviderConfig: ldapIDP(), + }, + }, + }, + }, + }, + { + name: "more than one challenge redirecting IdPs", + args: args{ + spec: configv1.OAuthSpec{ + IdentityProviders: []configv1.IdentityProvider{ + { + Name: "sso1", + IdentityProviderConfig: requestHeaderIDP(true, true), + }, + { + Name: "sso2", + IdentityProviderConfig: requestHeaderIDP(true, false), + }, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "identityProviders"), "", "only one identity provider can redirect clients requesting an authentication challenge, found: sso1, sso2"), + }, + }, + { + name: "mixing challenge issuing and redirecting IdPs", + args: args{ + spec: configv1.OAuthSpec{ + IdentityProviders: []configv1.IdentityProvider{ + { + Name: "sso", + IdentityProviderConfig: requestHeaderIDP(true, false), + }, + { + Name: "ldap", + IdentityProviderConfig: ldapIDP(), + }, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "identityProviders"), "", "cannot mix providers that redirect clients requesting auth challenges (sso) with providers issuing challenges to those clients (ldap)"), + }, + }, + { + name: "two IdPs with the same name", + args: args{ + spec: configv1.OAuthSpec{ + IdentityProviders: []configv1.IdentityProvider{ + { + Name: "aname", + IdentityProviderConfig: htpasswdIDP(), + }, + { + Name: "bname", + IdentityProviderConfig: htpasswdIDP(), + }, + { + Name: "aname", + IdentityProviderConfig: htpasswdIDP(), + }, + { + Name: "cname", + IdentityProviderConfig: htpasswdIDP(), + }, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "identityProviders").Index(2).Child("name"), "aname", "must have a unique name"), + }, + }, + { + name: "negative token inactivity timeout", + args: args{ + spec: configv1.OAuthSpec{ + TokenConfig: configv1.TokenConfig{ + AccessTokenInactivityTimeout: &metav1.Duration{Duration: -50 * time.Second}, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "tokenConfig", "accessTokenInactivityTimeout"), metav1.Duration{Duration: -50 * time.Second}, fmt.Sprintf("the minimum acceptable token timeout value is %d seconds", MinimumInactivityTimeoutSeconds)), + }, + }, + { + name: "positive token inactivity timeout", + args: args{ + spec: configv1.OAuthSpec{ + TokenConfig: configv1.TokenConfig{ + AccessTokenInactivityTimeout: &metav1.Duration{Duration: 32578 * time.Second}, + }, + }, + }, + }, + { + name: "zero token inactivity timeout", + args: args{ + spec: configv1.OAuthSpec{ + TokenConfig: configv1.TokenConfig{ + AccessTokenInactivityTimeout: &metav1.Duration{Duration: 0}, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "tokenConfig", "accessTokenInactivityTimeout"), metav1.Duration{Duration: 0 * time.Second}, fmt.Sprintf("the minimum acceptable token timeout value is %d seconds", MinimumInactivityTimeoutSeconds)), + }, + }, + { + name: "token inactivity timeout lower than the api constant minimum", + args: args{ + spec: configv1.OAuthSpec{ + TokenConfig: configv1.TokenConfig{ + AccessTokenInactivityTimeout: &metav1.Duration{Duration: 250 * time.Second}, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "tokenConfig", "accessTokenInactivityTimeout"), metav1.Duration{Duration: 250 * time.Second}, fmt.Sprintf("the minimum acceptable token timeout value is %d seconds", MinimumInactivityTimeoutSeconds)), + }, + }, + { + name: "negative token max age", + args: args{ + spec: configv1.OAuthSpec{ + TokenConfig: configv1.TokenConfig{ + AccessTokenMaxAgeSeconds: -20, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "tokenConfig", "accessTokenMaxAgeSeconds"), -20, "must be a positive integer or 0"), + }, + }, + { + name: "positive token max age", + args: args{ + spec: configv1.OAuthSpec{ + TokenConfig: configv1.TokenConfig{ + AccessTokenMaxAgeSeconds: 213123, + }, + }, + }, + }, + { + name: "zero token max age", + args: args{ + spec: configv1.OAuthSpec{ + TokenConfig: configv1.TokenConfig{ + AccessTokenMaxAgeSeconds: 0, + }, + }, + }, + }, + { + name: "template names all messed up", + args: args{ + spec: configv1.OAuthSpec{ + Templates: configv1.OAuthTemplates{ + Login: configv1.SecretNameReference{Name: "/this/is/wrong.html"}, + ProviderSelection: configv1.SecretNameReference{Name: "also_wrong"}, + Error: configv1.SecretNameReference{Name: "the&very+woRst"}, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "templates", "login", "name"), "/this/is/wrong.html", wrongConfigMapSecretErrMsg), + field.Invalid(field.NewPath("spec", "templates", "providerSelection", "name"), "also_wrong", wrongConfigMapSecretErrMsg), + field.Invalid(field.NewPath("spec", "templates", "error", "name"), "the&very+woRst", wrongConfigMapSecretErrMsg), + }, + }, + { + name: "everything set properly", + args: args{ + spec: configv1.OAuthSpec{ + IdentityProviders: []configv1.IdentityProvider{ + { + Name: "some_httpasswd", + IdentityProviderConfig: htpasswdIDP(), + }, + { + Name: "sso", + IdentityProviderConfig: requestHeaderIDP(false, true), + }, + }, + TokenConfig: configv1.TokenConfig{ + AccessTokenInactivityTimeout: &metav1.Duration{Duration: 300 * time.Second}, + AccessTokenMaxAgeSeconds: 216000, + }, + Templates: configv1.OAuthTemplates{ + Login: configv1.SecretNameReference{Name: "my-login-template"}, + ProviderSelection: configv1.SecretNameReference{Name: "provider-selection.template"}, + Error: configv1.SecretNameReference{Name: "a.template-with-error"}, + }, + }, + }, + }, + { + name: "two different IdPs in one object", + args: args{ + spec: configv1.OAuthSpec{ + IdentityProviders: []configv1.IdentityProvider{ + { + Name: "bad_bad_config", + IdentityProviderConfig: doubledIdPs, + }, + }, + TokenConfig: configv1.TokenConfig{ + AccessTokenMaxAgeSeconds: 216000, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "identityProviders").Index(0), doubledIdPs, "only one identity provider can be configured in single object"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := validateOAuthSpec(tt.args.spec) + + // DeepEqual does not seem to be working well here + var failedCheck bool + if len(got) != len(tt.want) { + failedCheck = true + } else { + // Check all the errors + for i := range got { + if got[i].Error() != tt.want[i].Error() { + failedCheck = true + break + } + } + } + + if failedCheck { + t.Errorf("validateOAuthSpec() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestValidateIdentityProvider(t *testing.T) { + type args struct { + identityProvider configv1.IdentityProvider + fldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "empty provider needs at least name and type in provider", + args: args{ + identityProvider: configv1.IdentityProvider{}, + }, + want: field.ErrorList{ + field.Required(field.NewPath("name"), ""), + field.Required(field.NewPath("type"), ""), + }, + }, + { + name: "unknown type name", + args: args{ + identityProvider: configv1.IdentityProvider{ + Name: "providingProvider", + IdentityProviderConfig: configv1.IdentityProviderConfig{ + Type: "someText", + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("type"), "someText", "not a valid provider type"), + }, + }, + { + name: "basic provider", + args: args{ + identityProvider: configv1.IdentityProvider{ + Name: "providingProvider", + IdentityProviderConfig: htpasswdIDP(), + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := ValidateIdentityProvider(tt.args.identityProvider, tt.args.fldPath) + // DeepEqual does not seem to be working well here + var failedCheck bool + if len(got) != len(tt.want) { + failedCheck = true + } else { + // Check all the errors + for i := range got { + if got[i].Error() != tt.want[i].Error() { + failedCheck = true + break + } + } + } + + if failedCheck { + t.Errorf("ValidateIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestValidateOAuthIdentityProvider(t *testing.T) { + type args struct { + clientID string + clientSecretRef configv1.SecretNameReference + fieldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "empty client ID and secret ref", + args: args{ + clientID: "", + clientSecretRef: configv1.SecretNameReference{}, + }, + want: field.ErrorList{ + field.Required(field.NewPath("clientID"), ""), + field.Required(field.NewPath("clientSecret", "name"), ""), + }, + }, + { + name: "improper client secret refname", + args: args{ + clientID: "thisBeClient", + clientSecretRef: configv1.SecretNameReference{Name: "terribleName_forASecret"}, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("clientSecret", "name"), "terribleName_forASecret", wrongConfigMapSecretErrMsg), + }, + }, + { + name: "working example", + args: args{ + clientID: "thisBeClient", + clientSecretRef: configv1.SecretNameReference{Name: "client-secret-hideout"}, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ValidateOAuthIdentityProvider(tt.args.clientID, tt.args.clientSecretRef, tt.args.fieldPath); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ValidateOAuthIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_keystone.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_keystone.go new file mode 100644 index 0000000000000..e1bf7cb76aed2 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_keystone.go @@ -0,0 +1,23 @@ +package oauth + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" +) + +func ValidateKeystoneIdentityProvider(provider *configv1.KeystoneIdentityProvider, fldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + if provider == nil { + errs = append(errs, field.Required(fldPath, "")) + return errs + } + + errs = append(errs, ValidateRemoteConnectionInfo(provider.OAuthRemoteConnectionInfo, fldPath)...) + + if len(provider.DomainName) == 0 { + errs = append(errs, field.Required(field.NewPath("domainName"), "")) + } + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_keystone_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_keystone_test.go new file mode 100644 index 0000000000000..6ccdddb7b9ebf --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_keystone_test.go @@ -0,0 +1,96 @@ +package oauth + +import ( + "reflect" + "testing" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func keystoneIdP() configv1.IdentityProviderConfig { + return configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeKeystone, + Keystone: &configv1.KeystoneIdentityProvider{ + OAuthRemoteConnectionInfo: configv1.OAuthRemoteConnectionInfo{ + URL: "https://somewhere.over.rainbow/ks", + CA: configv1.ConfigMapNameReference{Name: "govt-ca"}, + }, + DomainName: "production", + }, + } +} + +func TestValidateKeystoneIdentityProvider(t *testing.T) { + type args struct { + provider *configv1.KeystoneIdentityProvider + fldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "nil input provider", + want: field.ErrorList{ + field.Required(nil, ""), + }, + }, + { + name: "empty url", + args: args{ + provider: &configv1.KeystoneIdentityProvider{ + OAuthRemoteConnectionInfo: configv1.OAuthRemoteConnectionInfo{ + URL: "", + }, + DomainName: "production", + }, + }, + want: field.ErrorList{ + field.Required(field.NewPath("url"), ""), + }, + }, + { + name: "http url", + args: args{ + provider: &configv1.KeystoneIdentityProvider{ + OAuthRemoteConnectionInfo: configv1.OAuthRemoteConnectionInfo{ + URL: "http://foo", + }, + DomainName: "production", + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("url"), "http://foo", "must use https scheme"), + }, + }, + { + name: "missing domain name", + args: args{ + provider: &configv1.KeystoneIdentityProvider{ + OAuthRemoteConnectionInfo: configv1.OAuthRemoteConnectionInfo{ + URL: "https://keystone.openstack.nasa.gov/", + }, + }, + }, + want: field.ErrorList{ + field.Required(field.NewPath("domainName"), ""), + }, + }, + { + name: "working provider", + args: args{ + provider: keystoneIdP().Keystone, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ValidateKeystoneIdentityProvider(tt.args.provider, tt.args.fldPath); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ValidateKeystoneIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_ldap.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_ldap.go new file mode 100644 index 0000000000000..b5f40060b9cc9 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_ldap.go @@ -0,0 +1,66 @@ +package oauth + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/security/ldaputil" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +func ValidateLDAPIdentityProvider(provider *configv1.LDAPIdentityProvider, fldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + + if provider == nil { + errs = append(errs, field.Required(fldPath, "")) + return errs + } + + errs = append(errs, validateLDAPClientConfig(provider.URL, provider.BindDN, provider.BindPassword.Name, provider.CA.Name, provider.Insecure, fldPath)...) + errs = append(errs, crvalidation.ValidateSecretReference(fldPath.Child("bindPassword"), provider.BindPassword, false)...) + errs = append(errs, crvalidation.ValidateConfigMapReference(fldPath.Child("ca"), provider.CA, false)...) + + // At least one attribute to use as the user id is required + if len(provider.Attributes.ID) == 0 { + errs = append(errs, field.Invalid(fldPath.Child("attributes", "id"), "[]", "at least one id attribute is required (LDAP standard identity attribute is 'dn')")) + } + + return errs +} + +// TODO clean this up +func validateLDAPClientConfig(url, bindDN, bindPasswordRef, CA string, insecure bool, fldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + + // Make sure bindDN and bindPassword are both set, or both unset + // Both unset means an anonymous bind is used for search (https://tools.ietf.org/html/rfc4513#section-5.1.1) + // Both set means the name/password simple bind is used for search (https://tools.ietf.org/html/rfc4513#section-5.1.3) + if (len(bindDN) == 0) != (len(bindPasswordRef) == 0) { + errs = append(errs, field.Invalid(fldPath.Child("bindDN"), bindDN, "bindDN and bindPassword must both be specified, or both be empty")) + errs = append(errs, field.Invalid(fldPath.Child("bindPassword").Child("name"), bindPasswordRef, "bindDN and bindPassword must both be specified, or both be empty")) + } + + if len(url) == 0 { + errs = append(errs, field.Required(fldPath.Child("url"), "")) + return errs + } + + u, err := ldaputil.ParseURL(url) + if err != nil { + errs = append(errs, field.Invalid(fldPath.Child("url"), url, err.Error())) + return errs + } + + if insecure { + if u.Scheme == ldaputil.SchemeLDAPS { + errs = append(errs, field.Invalid(fldPath.Child("url"), url, fmt.Sprintf("Cannot use %s scheme with insecure=true", u.Scheme))) + } + if len(CA) > 0 { + errs = append(errs, field.Invalid(fldPath.Child("ca"), CA, "Cannot specify a ca with insecure=true")) + } + } + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_ldap_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_ldap_test.go new file mode 100644 index 0000000000000..85daa9e182541 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_ldap_test.go @@ -0,0 +1,101 @@ +package oauth + +import ( + "reflect" + "testing" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func ldapIDP() configv1.IdentityProviderConfig { + return configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeLDAP, + LDAP: &configv1.LDAPIdentityProvider{ + Attributes: configv1.LDAPAttributeMapping{ + ID: []string{"memberUid"}, + }, + BindDN: "uid=readallaccount,ou=privileged,dc=coolpeople,dc=se", + BindPassword: configv1.SecretNameReference{ + Name: "ldap-secret", + }, + CA: configv1.ConfigMapNameReference{Name: "ldap-ca-configmap"}, + Insecure: false, + URL: "ldaps://ldapinstance.corporate.coolpeople.se/ou=Groups,dc=coolpeople,dc=se?memberUid?sub", + }, + } +} + +func TestValidateLDAPIdentityProvider(t *testing.T) { + type args struct { + provider *configv1.LDAPIdentityProvider + fldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "nil input provider", + want: field.ErrorList{ + field.Required(nil, ""), + }, + }, + { + name: "invalid bindPassword ref name, missing ID", + args: args{ + provider: &configv1.LDAPIdentityProvider{ + BindPassword: configv1.SecretNameReference{Name: "bad_refname"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("bindDN"), "", "bindDN and bindPassword must both be specified, or both be empty"), + field.Invalid(field.NewPath("bindPassword", "name"), "bad_refname", "bindDN and bindPassword must both be specified, or both be empty"), + field.Required(field.NewPath("url"), ""), + field.Invalid(field.NewPath("bindPassword", "name"), "bad_refname", wrongConfigMapSecretErrMsg), + field.Invalid(field.NewPath("attributes", "id"), "[]", "at least one id attribute is required (LDAP standard identity attribute is 'dn')"), + }, + }, + { + name: "invalid url", + args: args{ + provider: &configv1.LDAPIdentityProvider{ + URL: "https://foo", + Attributes: configv1.LDAPAttributeMapping{ + ID: []string{"uid"}, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("url"), "https://foo", `invalid scheme "https"`), + }, + }, + { + name: "minimal passing provider", + args: args{ + provider: &configv1.LDAPIdentityProvider{ + URL: "ldap://foo", + Attributes: configv1.LDAPAttributeMapping{ + ID: []string{"uid"}, + }, + }, + }, + want: field.ErrorList{}, + }, + { + name: "more complicated use", + args: args{ + provider: ldapIDP().LDAP, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ValidateLDAPIdentityProvider(tt.args.provider, tt.args.fldPath); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ValidateLDAPIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_oauth.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_oauth.go new file mode 100644 index 0000000000000..eec9bf57532e2 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_oauth.go @@ -0,0 +1,111 @@ +package oauth + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateOAuth" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return crvalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.GroupVersion.WithResource("oauths").GroupResource(): true, + }, + map[schema.GroupVersionKind]crvalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("OAuth"): oauthV1{}, + }) + }) +} + +func toOAuthV1(uncastObj runtime.Object) (*configv1.OAuth, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + errs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.OAuth) + if !ok { + return nil, append(errs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"OAuth"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type oauthV1 struct{} + +func (oauthV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, errs := toOAuthV1(uncastObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, crvalidation.RequireNameCluster, field.NewPath("metadata"))...) + errs = append(errs, validateOAuthSpecCreate(obj.Spec)...) + + return errs +} + +func (oauthV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toOAuthV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toOAuthV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateOAuthSpecUpdate(obj.Spec, oldObj.Spec)...) + + return errs +} + +func (oauthV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toOAuthV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toOAuthV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateOAuthStatus(obj.Status)...) + + return errs +} + +func validateOAuthSpecCreate(spec configv1.OAuthSpec) field.ErrorList { + return validateOAuthSpec(spec) +} + +func validateOAuthSpecUpdate(newspec, oldspec configv1.OAuthSpec) field.ErrorList { + return validateOAuthSpec(newspec) +} + +func validateOAuthStatus(status configv1.OAuthStatus) field.ErrorList { + errs := field.ErrorList{} + + // TODO + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_openid.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_openid.go new file mode 100644 index 0000000000000..41d8c35db3f91 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_openid.go @@ -0,0 +1,54 @@ +package oauth + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/config/validation" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +func ValidateOpenIDIdentityProvider(provider *configv1.OpenIDIdentityProvider, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if provider == nil { + allErrs = append(allErrs, field.Required(fieldPath, "")) + return allErrs + } + + allErrs = append(allErrs, ValidateOAuthIdentityProvider(provider.ClientID, provider.ClientSecret, fieldPath)...) + + if provider.Issuer != strings.TrimRight(provider.Issuer, "/") { + allErrs = append(allErrs, field.Invalid(fieldPath.Child("issuer"), provider.Issuer, "cannot end with '/'")) + } + + // The specs are a bit ambiguous on whether this must or needn't be https:// + // schema, but they do require (MUST) TLS support for the discovery and we do + // require this in out API description + // https://openid.net/specs/openid-connect-discovery-1_0.html#TLSRequirements + url, issuerErrs := validation.ValidateSecureURL(provider.Issuer, fieldPath.Child("issuer")) + allErrs = append(allErrs, issuerErrs...) + if len(url.RawQuery) > 0 || len(url.Fragment) > 0 { + allErrs = append(allErrs, field.Invalid(fieldPath.Child("issuer"), provider.Issuer, "must not specify query or fragment component")) + } + + allErrs = append(allErrs, crvalidation.ValidateConfigMapReference(fieldPath.Child("ca"), provider.CA, false)...) + + for i, scope := range provider.ExtraScopes { + // https://tools.ietf.org/html/rfc6749#section-3.3 (full list of allowed chars is %x21 / %x23-5B / %x5D-7E) + // for those without an ascii table, that's `!`, `#-[`, `]-~` inclusive. + for _, ch := range scope { + switch { + case ch == '!': + case ch >= '#' && ch <= '[': + case ch >= ']' && ch <= '~': + default: + allErrs = append(allErrs, field.Invalid(fieldPath.Child("extraScopes").Index(i), scope, fmt.Sprintf("cannot contain %v", ch))) + } + } + } + + return allErrs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_openid_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_openid_test.go new file mode 100644 index 0000000000000..2c243bcccaa47 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_openid_test.go @@ -0,0 +1,125 @@ +package oauth + +import ( + "reflect" + "testing" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func openidIDP() configv1.IdentityProviderConfig { + return configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeOpenID, + OpenID: &configv1.OpenIDIdentityProvider{ + ClientID: "readallPerson", + ClientSecret: configv1.SecretNameReference{Name: "oidc-secret"}, + Issuer: "https://oidc-friendly.domain.com", + CA: configv1.ConfigMapNameReference{Name: "oidc-ca"}, + ExtraScopes: []string{"email", "profile"}, + ExtraAuthorizeParameters: map[string]string{ + "include_granted_scopes": "true", + }, + Claims: configv1.OpenIDClaims{ + PreferredUsername: []string{"full_name", "email"}, + Email: []string{"email"}, + }, + }, + } +} + +func TestValidateOpenIDIdentityProvider(t *testing.T) { + type args struct { + provider *configv1.OpenIDIdentityProvider + fieldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "nil input provider", + want: field.ErrorList{ + field.Required(nil, ""), + }, + }, + { + name: "missing client ID and secret", + args: args{ + provider: &configv1.OpenIDIdentityProvider{ + Issuer: "https://bigcorp.oidc.com", + }, + }, + want: field.ErrorList{ + field.Required(field.NewPath("clientID"), ""), + field.Required(field.NewPath("clientSecret", "name"), ""), + }, + }, + { + name: "missing issuer", + args: args{ + provider: &configv1.OpenIDIdentityProvider{ + ClientID: "readallPerson", + ClientSecret: configv1.SecretNameReference{Name: "oidc-secret"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("issuer"), "", "must contain a scheme (e.g. https://)"), + field.Invalid(field.NewPath("issuer"), "", "must contain a host"), + }, + }, + { + name: "issuer with http:// scheme", + args: args{ + provider: &configv1.OpenIDIdentityProvider{ + ClientID: "gentleDolphin", + ClientSecret: configv1.SecretNameReference{Name: "seemsliggit"}, + Issuer: "http://oidc-friendly.domain.com", + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("issuer"), "http://oidc-friendly.domain.com", "must use https scheme"), + }, + }, + { + name: "bad CA refname", + args: args{ + provider: &configv1.OpenIDIdentityProvider{ + ClientID: "readallPerson", + ClientSecret: configv1.SecretNameReference{Name: "oidc-secret"}, + Issuer: "https://oidc-friendly.domain.com", + CA: configv1.ConfigMapNameReference{Name: "the_Nameofaca"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("ca", "name"), "the_Nameofaca", wrongConfigMapSecretErrMsg), + }, + }, + { + name: "minimal working example", + args: args{ + provider: &configv1.OpenIDIdentityProvider{ + ClientID: "readallPerson", + ClientSecret: configv1.SecretNameReference{Name: "oidc-secret"}, + Issuer: "https://oidc-friendly.domain.com", + }, + }, + want: field.ErrorList{}, + }, + { + name: "more complicated use", + args: args{ + provider: openidIDP().OpenID, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ValidateOpenIDIdentityProvider(tt.args.provider, tt.args.fieldPath); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ValidateOpenIDIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_requestheader.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_requestheader.go new file mode 100644 index 0000000000000..93b7c5844cd4f --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_requestheader.go @@ -0,0 +1,85 @@ +package oauth + +import ( + "fmt" + "net/url" + "path" + "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/config/validation" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const ( + // URLToken in the query of the redirectURL gets replaced with the original request URL, escaped as a query parameter. + // Example use: https://www.example.com/login?then=${url} + urlToken = "${url}" + + // QueryToken in the query of the redirectURL gets replaced with the original request URL, unescaped. + // Example use: https://www.example.com/sso/oauth/authorize?${query} + queryToken = "${query}" +) + +func ValidateRequestHeaderIdentityProvider(provider *configv1.RequestHeaderIdentityProvider, fieldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + if provider == nil { + errs = append(errs, field.Required(fieldPath, "")) + return errs + } + + errs = append(errs, crvalidation.ValidateConfigMapReference(fieldPath.Child("ca"), provider.ClientCA, true)...) + + if len(provider.Headers) == 0 { + errs = append(errs, field.Required(fieldPath.Child("headers"), "")) + } + + if len(provider.ChallengeURL) == 0 && len(provider.LoginURL) == 0 { + errs = append(errs, field.Required(fieldPath, "at least one of challengeURL or loginURL must be specified")) + } + + if len(provider.ChallengeURL) > 0 { + u, urlErrs := validation.ValidateURL(provider.ChallengeURL, fieldPath.Child("challengeURL")) + errs = append(errs, urlErrs...) + if len(urlErrs) == 0 { + if !hasParamToken(u) { + errs = append(errs, + field.Invalid(field.NewPath("challengeURL"), provider.ChallengeURL, + fmt.Sprintf("query does not include %q or %q, redirect will not preserve original authorize parameters", urlToken, queryToken)), + ) + } + } + } + + if len(provider.LoginURL) > 0 { + u, urlErrs := validation.ValidateURL(provider.LoginURL, fieldPath.Child("loginURL")) + errs = append(errs, urlErrs...) + if len(urlErrs) == 0 { + if !hasParamToken(u) { + errs = append(errs, + field.Invalid(fieldPath.Child("loginURL"), provider.LoginURL, + fmt.Sprintf("query does not include %q or %q, redirect will not preserve original authorize parameters", urlToken, queryToken), + ), + ) + } + if strings.HasSuffix(u.Path, "/") { + errs = append(errs, + field.Invalid(fieldPath.Child("loginURL"), provider.LoginURL, `path ends with "/", grant approval flows will not function correctly`), + ) + } + if _, file := path.Split(u.Path); file != "authorize" { + errs = append(errs, + field.Invalid(fieldPath.Child("loginURL"), provider.LoginURL, `path does not end with "/authorize", grant approval flows will not function correctly`), + ) + } + } + } + + return errs +} + +func hasParamToken(u *url.URL) bool { + return strings.Contains(u.RawQuery, urlToken) || strings.Contains(u.RawQuery, queryToken) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_requestheader_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_requestheader_test.go new file mode 100644 index 0000000000000..44e590f0b2b5e --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_requestheader_test.go @@ -0,0 +1,193 @@ +package oauth + +import ( + "reflect" + "testing" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func requestHeaderIDP(challenge, login bool) configv1.IdentityProviderConfig { + var challengeURL, loginURL string + + if challenge { + challengeURL = "https://sso.corporate.coolpeople.se/challenges/oauth/authorize?${query}" + } + if login { + loginURL = "https://sso.corporate.coolpeople.se/loginz/oauth/authorize?${query}" + } + + return configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeRequestHeader, + RequestHeader: &configv1.RequestHeaderIdentityProvider{ + LoginURL: loginURL, + ChallengeURL: challengeURL, + ClientCA: configv1.ConfigMapNameReference{ + Name: "coolpeople-client-ca", + }, + ClientCommonNames: []string{"authn-proxy"}, + Headers: []string{"X-Remote-User", "SSO-User"}, + NameHeaders: []string{"X-Remote-User-Display-Name"}, + }, + } +} + +func TestValidateRequestHeaderIdentityProvider(t *testing.T) { + type args struct { + provider *configv1.RequestHeaderIdentityProvider + fieldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "nil input provider", + want: field.ErrorList{ + field.Required(nil, ""), + }, + }, + { + name: "empty provider", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{}, + }, + want: field.ErrorList{ + field.Required(field.NewPath("ca", "name"), ""), + field.Required(field.NewPath("headers"), ""), + {Type: field.ErrorTypeRequired, Field: "", BadValue: "", Detail: "at least one of challengeURL or loginURL must be specified"}, + }, + }, + { + name: "wrong ca refname", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + LoginURL: "http://oauth.coolpeoplecorp.com/login/authorize?${query}", + Headers: []string{"X-Remote-User"}, + ClientCA: configv1.ConfigMapNameReference{Name: "dat_badrefname"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("ca", "name"), "dat_badrefname", wrongConfigMapSecretErrMsg), + }, + }, + { + name: "challenge url without query, no client CA set", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + ChallengeURL: "http://oauth.coolpeoplecorp.com/challenge-endpoint", + }, + }, + want: field.ErrorList{ + field.Required(field.NewPath("ca", "name"), ""), + field.Invalid(field.NewPath("challengeURL"), "http://oauth.coolpeoplecorp.com/challenge-endpoint", "query does not include \"${url}\" or \"${query}\", redirect will not preserve original authorize parameters"), + }, + }, + { + name: "challenge url with query - no ${url}, ${query}", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + ChallengeURL: "http://oauth.coolpeoplecorp.com/challenge-endpoint?${sender}", + ClientCA: configv1.ConfigMapNameReference{Name: "auth-ca"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("challengeURL"), "http://oauth.coolpeoplecorp.com/challenge-endpoint?${sender}", "query does not include \"${url}\" or \"${query}\", redirect will not preserve original authorize parameters"), + }, + }, + { + name: "challenge url with query - ${url}", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + ChallengeURL: "http://oauth.coolpeoplecorp.com/challenge-endpoint?${url}", + ClientCA: configv1.ConfigMapNameReference{Name: "auth-ca"}, + }, + }, + want: field.ErrorList{}, + }, + { + name: "login url without query and authorize", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + LoginURL: "http://oauth.coolpeoplecorp.com/challenge-endpoint", + ClientCA: configv1.ConfigMapNameReference{Name: "auth-ca"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("loginURL"), "http://oauth.coolpeoplecorp.com/challenge-endpoint", "query does not include \"${url}\" or \"${query}\", redirect will not preserve original authorize parameters"), + field.Invalid(field.NewPath("loginURL"), "http://oauth.coolpeoplecorp.com/challenge-endpoint", "path does not end with \"/authorize\", grant approval flows will not function correctly"), + }, + }, + { + name: "login url with query - no ${url}, ${query} - no client CA set", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + LoginURL: "http://oauth.coolpeoplecorp.com/login-endpoint/authorize?${custom}", + }, + }, + want: field.ErrorList{ + field.Required(field.NewPath("ca", "name"), ""), + field.Invalid(field.NewPath("loginURL"), "http://oauth.coolpeoplecorp.com/login-endpoint/authorize?${custom}", "query does not include \"${url}\" or \"${query}\", redirect will not preserve original authorize parameters"), + }, + }, + { + name: "login url with query - ${query} - no /authorize", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + LoginURL: "http://oauth.coolpeoplecorp.com/login-endpoint?${query}", + ClientCA: configv1.ConfigMapNameReference{Name: "auth-ca"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("loginURL"), "http://oauth.coolpeoplecorp.com/login-endpoint?${query}", "path does not end with \"/authorize\", grant approval flows will not function correctly"), + }, + }, + { + name: "login url with query - ${query} - ends with /", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + LoginURL: "http://oauth.coolpeoplecorp.com/login-endpoint/authorize/?${query}", + ClientCA: configv1.ConfigMapNameReference{Name: "auth-ca"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("loginURL"), "http://oauth.coolpeoplecorp.com/login-endpoint/authorize/?${query}", "path ends with \"/\", grant approval flows will not function correctly"), + field.Invalid(field.NewPath("loginURL"), "http://oauth.coolpeoplecorp.com/login-endpoint/authorize/?${query}", "path does not end with \"/authorize\", grant approval flows will not function correctly"), + }, + }, + { + name: "login url with query - ${query}", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + LoginURL: "http://oauth.coolpeoplecorp.com/login-endpoint/authorize?${query}", + ClientCA: configv1.ConfigMapNameReference{Name: "auth-ca"}, + }, + }, + want: field.ErrorList{}, + }, + { + name: "more complicated use", + args: args{ + provider: requestHeaderIDP(true, true).RequestHeader, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ValidateRequestHeaderIdentityProvider(tt.args.provider, tt.args.fieldPath); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ValidateRequestHeaderIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/operator/deny_delete_cluster_operator_resource.go b/openshift-kube-apiserver/admission/customresourcevalidation/operator/deny_delete_cluster_operator_resource.go new file mode 100644 index 0000000000000..f4cb78543ccef --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/operator/deny_delete_cluster_operator_resource.go @@ -0,0 +1,52 @@ +package operator + +import ( + "context" + "fmt" + "io" + + "k8s.io/apiserver/pkg/admission" +) + +const PluginName = "operator.openshift.io/DenyDeleteClusterOperators" + +// Register registers an admission plugin factory whose plugin prevents the deletion of cluster operator resources. +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return newAdmissionPlugin(), nil + }) +} + +var _ admission.ValidationInterface = &admissionPlugin{} + +type admissionPlugin struct { + *admission.Handler +} + +func newAdmissionPlugin() *admissionPlugin { + return &admissionPlugin{Handler: admission.NewHandler(admission.Delete)} +} + +// Validate returns an error if there is an attempt to delete a cluster operator resource. +func (p *admissionPlugin) Validate(ctx context.Context, attributes admission.Attributes, _ admission.ObjectInterfaces) error { + if len(attributes.GetSubresource()) > 0 { + return nil + } + if attributes.GetResource().Group != "operator.openshift.io" { + return nil + } + switch attributes.GetResource().Resource { + // Deletion is denied for storages.operator.openshift.io objects named cluster, + // because MCO and KCM-O depend on this resource being present in order to + // correctly set environment variables on kubelet and kube-controller-manager. + case "storages": + if attributes.GetName() != "cluster" { + return nil + } + // Deletion is allowed for all other operator.openshift.io objects unless + // explicitly listed above. + default: + return nil + } + return admission.NewForbidden(attributes, fmt.Errorf("deleting required %s.%s resource, named %s, is not allowed", attributes.GetResource().Resource, attributes.GetResource().Group, attributes.GetName())) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/operator/deny_delete_cluster_operator_resource_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/operator/deny_delete_cluster_operator_resource_test.go new file mode 100644 index 0000000000000..6b0eaa5cc911d --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/operator/deny_delete_cluster_operator_resource_test.go @@ -0,0 +1,73 @@ +package operator + +import ( + "context" + "testing" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" +) + +func TestAdmissionPlugin_Validate(t *testing.T) { + testCases := []struct { + tcName string + group string + resource string + name string + denyDelete bool + }{ + { + tcName: "NotBlackListedResourceNamedCluster", + group: "operator.openshift.io", + resource: "notBlacklisted", + name: "cluster", + denyDelete: false, + }, + { + tcName: "NotBlackListedResourceNamedNotCluster", + group: "operator.openshift.io", + resource: "notBlacklisted", + name: "notCluster", + denyDelete: false, + }, + { + tcName: "StorageResourceNamedCluster", + group: "operator.openshift.io", + resource: "storages", + name: "cluster", + denyDelete: true, + }, + { + tcName: "StorageResourceNamedNotCluster", + group: "operator.openshift.io", + resource: "storages", + name: "notCluster", + denyDelete: false, + }, + { + tcName: "ClusterVersionNotVersion", + group: "config.openshift.io", + resource: "clusterversions", + name: "instance", + denyDelete: false, + }, + { + tcName: "OtherGroup", + group: "not.operator.openshift.io", + resource: "notBlacklisted", + name: "cluster", + denyDelete: false, + }, + } + for _, tc := range testCases { + t.Run(tc.tcName, func(t *testing.T) { + err := newAdmissionPlugin().Validate(context.TODO(), admission.NewAttributesRecord( + nil, nil, schema.GroupVersionKind{}, "", + tc.name, schema.GroupVersionResource{Group: tc.group, Resource: tc.resource}, + "", admission.Delete, nil, false, nil), nil) + if tc.denyDelete != (err != nil) { + t.Error(tc.denyDelete, err) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/project/validate_project.go b/openshift-kube-apiserver/admission/customresourcevalidation/project/validate_project.go new file mode 100644 index 0000000000000..d0e1af58ab999 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/project/validate_project.go @@ -0,0 +1,112 @@ +package project + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + validationutil "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateProject" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.Resource("projects"): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("Project"): projectV1{}, + }) + }) +} + +func toProjectV1(uncastObj runtime.Object) (*configv1.Project, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.Project) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Project"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type projectV1 struct { +} + +func validateProjectSpec(spec configv1.ProjectSpec) field.ErrorList { + allErrs := field.ErrorList{} + + if len(spec.ProjectRequestMessage) > 4096 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec.projectRequestMessage"), spec, validationutil.MaxLenError(4096))) + } + + if name := spec.ProjectRequestTemplate.Name; len(name) > 0 { + for _, msg := range validation.NameIsDNSSubdomain(spec.ProjectRequestTemplate.Name, false) { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec.projectRequestTemplate.name"), name, msg)) + } + } + + return allErrs +} + +func (projectV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, allErrs := toProjectV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + allErrs = append(allErrs, validateProjectSpec(obj.Spec)...) + + return allErrs +} + +func (projectV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, allErrs := toProjectV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + oldObj, allErrs := toProjectV1(uncastOldObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + allErrs = append(allErrs, validateProjectSpec(obj.Spec)...) + + return allErrs +} + +func (projectV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toProjectV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toProjectV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validate_rbr.go b/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validate_rbr.go new file mode 100644 index 0000000000000..28d4958db9490 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validate_rbr.go @@ -0,0 +1,84 @@ +package rolebindingrestriction + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + authorizationv1 "github.com/openshift/api/authorization/v1" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" + rbrvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validation" +) + +const PluginName = "authorization.openshift.io/ValidateRoleBindingRestriction" + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + {Group: authorizationv1.GroupName, Resource: "rolebindingrestrictions"}: true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + authorizationv1.GroupVersion.WithKind("RoleBindingRestriction"): roleBindingRestrictionV1{}, + }) + }) +} + +func toRoleBindingRestriction(uncastObj runtime.Object) (*authorizationv1.RoleBindingRestriction, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*authorizationv1.RoleBindingRestriction) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"RoleBindingRestriction"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{authorizationv1.GroupVersion.String()})) + } + + return obj, nil +} + +type roleBindingRestrictionV1 struct { +} + +func (roleBindingRestrictionV1) ValidateCreate(_ context.Context, obj runtime.Object) field.ErrorList { + roleBindingRestrictionObj, errs := toRoleBindingRestriction(obj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&roleBindingRestrictionObj.ObjectMeta, true, validation.NameIsDNSSubdomain, field.NewPath("metadata"))...) + errs = append(errs, rbrvalidation.ValidateRoleBindingRestriction(roleBindingRestrictionObj)...) + + return errs +} + +func (roleBindingRestrictionV1) ValidateUpdate(_ context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList { + roleBindingRestrictionObj, errs := toRoleBindingRestriction(obj) + if len(errs) > 0 { + return errs + } + roleBindingRestrictionOldObj, errs := toRoleBindingRestriction(oldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&roleBindingRestrictionObj.ObjectMeta, true, validation.NameIsDNSSubdomain, field.NewPath("metadata"))...) + errs = append(errs, rbrvalidation.ValidateRoleBindingRestrictionUpdate(roleBindingRestrictionObj, roleBindingRestrictionOldObj)...) + + return errs +} + +func (r roleBindingRestrictionV1) ValidateStatusUpdate(ctx context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList { + return r.ValidateUpdate(ctx, obj, oldObj) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validation/validation.go b/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validation/validation.go new file mode 100644 index 0000000000000..e93824220d7fb --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validation/validation.go @@ -0,0 +1,115 @@ +package validation + +import ( + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/apis/core/validation" + + authorizationv1 "github.com/openshift/api/authorization/v1" +) + +func ValidateRoleBindingRestriction(rbr *authorizationv1.RoleBindingRestriction) field.ErrorList { + allErrs := validation.ValidateObjectMeta(&rbr.ObjectMeta, true, + apimachineryvalidation.NameIsDNSSubdomain, field.NewPath("metadata")) + + allErrs = append(allErrs, + ValidateRoleBindingRestrictionSpec(&rbr.Spec, field.NewPath("spec"))...) + + return allErrs +} + +func ValidateRoleBindingRestrictionUpdate(rbr, old *authorizationv1.RoleBindingRestriction) field.ErrorList { + allErrs := ValidateRoleBindingRestriction(rbr) + + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&rbr.ObjectMeta, + &old.ObjectMeta, field.NewPath("metadata"))...) + + return allErrs +} + +func ValidateRoleBindingRestrictionSpec(spec *authorizationv1.RoleBindingRestrictionSpec, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + const invalidMsg = `must specify exactly one of userrestriction, grouprestriction, or serviceaccountrestriction` + + if spec.UserRestriction != nil { + if spec.GroupRestriction != nil { + allErrs = append(allErrs, field.Invalid(fld.Child("grouprestriction"), + "both userrestriction and grouprestriction specified", invalidMsg)) + } + if spec.ServiceAccountRestriction != nil { + allErrs = append(allErrs, + field.Invalid(fld.Child("serviceaccountrestriction"), + "both userrestriction and serviceaccountrestriction specified", invalidMsg)) + } + } else if spec.GroupRestriction != nil { + if spec.ServiceAccountRestriction != nil { + allErrs = append(allErrs, + field.Invalid(fld.Child("serviceaccountrestriction"), + "both grouprestriction and serviceaccountrestriction specified", invalidMsg)) + } + } else if spec.ServiceAccountRestriction == nil { + allErrs = append(allErrs, field.Required(fld.Child("userrestriction"), + invalidMsg)) + } + + if spec.UserRestriction != nil { + allErrs = append(allErrs, ValidateRoleBindingRestrictionUser(spec.UserRestriction, fld.Child("userrestriction"))...) + } + if spec.GroupRestriction != nil { + allErrs = append(allErrs, ValidateRoleBindingRestrictionGroup(spec.GroupRestriction, fld.Child("grouprestriction"))...) + } + if spec.ServiceAccountRestriction != nil { + allErrs = append(allErrs, ValidateRoleBindingRestrictionServiceAccount(spec.ServiceAccountRestriction, fld.Child("serviceaccountrestriction"))...) + } + + return allErrs +} + +func ValidateRoleBindingRestrictionUser(user *authorizationv1.UserRestriction, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + const invalidMsg = `must specify at least one user, group, or label selector` + + if !(len(user.Users) > 0 || len(user.Groups) > 0 || len(user.Selectors) > 0) { + allErrs = append(allErrs, field.Required(fld.Child("users"), invalidMsg)) + } + + for i, selector := range user.Selectors { + allErrs = append(allErrs, + unversionedvalidation.ValidateLabelSelector(&selector, + unversionedvalidation.LabelSelectorValidationOptions{}, + fld.Child("selector").Index(i))...) + } + + return allErrs +} + +func ValidateRoleBindingRestrictionGroup(group *authorizationv1.GroupRestriction, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + const invalidMsg = `must specify at least one group or label selector` + + if !(len(group.Groups) > 0 || len(group.Selectors) > 0) { + allErrs = append(allErrs, field.Required(fld.Child("groups"), invalidMsg)) + } + + for i, selector := range group.Selectors { + allErrs = append(allErrs, + unversionedvalidation.ValidateLabelSelector(&selector, + unversionedvalidation.LabelSelectorValidationOptions{}, + fld.Child("selector").Index(i))...) + } + + return allErrs +} + +func ValidateRoleBindingRestrictionServiceAccount(sa *authorizationv1.ServiceAccountRestriction, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + const invalidMsg = `must specify at least one service account or namespace` + + if !(len(sa.ServiceAccounts) > 0 || len(sa.Namespaces) > 0) { + allErrs = append(allErrs, + field.Required(fld.Child("serviceaccounts"), invalidMsg)) + } + + return allErrs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/route/default_route.go b/openshift-kube-apiserver/admission/customresourcevalidation/route/default_route.go new file mode 100644 index 0000000000000..74608f2cf2eb9 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/route/default_route.go @@ -0,0 +1,65 @@ +package route + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + + v1 "github.com/openshift/api/route/v1" +) + +const ( + DefaultingPluginName = "route.openshift.io/DefaultRoute" +) + +func RegisterDefaulting(plugins *admission.Plugins) { + plugins.Register(DefaultingPluginName, func(_ io.Reader) (admission.Interface, error) { + return &defaultRoute{ + Handler: admission.NewHandler(admission.Create, admission.Update), + }, nil + }) +} + +type defaultRoute struct { + *admission.Handler +} + +var _ admission.MutationInterface = &defaultRoute{} + +func (a *defaultRoute) Admit(ctx context.Context, attributes admission.Attributes, _ admission.ObjectInterfaces) error { + if attributes.GetResource().GroupResource() != (schema.GroupResource{Group: "route.openshift.io", Resource: "routes"}) { + return nil + } + + if len(attributes.GetSubresource()) > 0 { + return nil + } + + u, ok := attributes.GetObject().(runtime.Unstructured) + if !ok { + // If a request to the resource routes.route.openshift.io is subject to + // kube-apiserver admission, that should imply that the route API is being served as + // CRs and the request body should have been unmarshaled into an unstructured + // object. + return fmt.Errorf("object being admitted is of type %T and does not implement runtime.Unstructured", attributes.GetObject()) + } + + var external v1.Route + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.UnstructuredContent(), &external); err != nil { + return err + } + + SetObjectDefaults_Route(&external) + + content, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&external) + if err != nil { + return err + } + u.SetUnstructuredContent(content) + + return nil +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/route/defaulters.go b/openshift-kube-apiserver/admission/customresourcevalidation/route/defaulters.go new file mode 100644 index 0000000000000..c174dbcdfca50 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/route/defaulters.go @@ -0,0 +1,28 @@ +package route + +import ( + routev1 "github.com/openshift/api/route/v1" + "github.com/openshift/library-go/pkg/route/defaulting" +) + +// Defaulters defined in github.com/openshift/library-go/pkg/route/defaulting are not recongized by +// codegen (make update). This file MUST contain duplicates of each defaulter function defined in +// library-go, with the body of each function defined here delegating to its library-go +// counterpart. Missing or extra defaulters here will introduce differences between Route as a CRD +// (MicroShift) and Route as an aggregated API of openshift-apiserver. + +func SetDefaults_RouteSpec(obj *routev1.RouteSpec) { + defaulting.SetDefaults_RouteSpec(obj) +} + +func SetDefaults_RouteTargetReference(obj *routev1.RouteTargetReference) { + defaulting.SetDefaults_RouteTargetReference(obj) +} + +func SetDefaults_TLSConfig(obj *routev1.TLSConfig) { + defaulting.SetDefaults_TLSConfig(obj) +} + +func SetDefaults_RouteIngress(obj *routev1.RouteIngress) { + defaulting.SetDefaults_RouteIngress(obj) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/route/defaulters_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/route/defaulters_test.go new file mode 100644 index 0000000000000..eff11a27765b7 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/route/defaulters_test.go @@ -0,0 +1,66 @@ +package route + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "io/fs" + "strings" + "testing" + + "k8s.io/apimachinery/pkg/util/sets" +) + +func TestDuplicatedDefaulters(t *testing.T) { + expected, err := findDefaultersInPackage("../../../../vendor/github.com/openshift/library-go/pkg/route/defaulting") + if err != nil { + t.Fatalf("error finding expected manual defaulters: %v", err) + } + + actual, err := findDefaultersInPackage(".") + if err != nil { + t.Fatalf("error finding actual manual defaulters: %v", err) + } + + for _, missing := range expected.Difference(actual).List() { + t.Errorf("missing local duplicate of library-go defaulter %q", missing) + } + + for _, extra := range actual.Difference(expected).List() { + t.Errorf("found local defaulter %q without library-go counterpart", extra) + } +} + +// findDefaultersInPackage parses the source of the Go package at the given path and returns the +// names of all manual defaulter functions it declares. Package function declarations can't be +// enumerated using reflection. +func findDefaultersInPackage(path string) (sets.String, error) { + pkgs, err := parser.ParseDir(token.NewFileSet(), path, func(fi fs.FileInfo) bool { + return !strings.HasSuffix(fi.Name(), "_test.go") + }, 0) + if err != nil { + return nil, fmt.Errorf("failed to parse source of package at %q: %v", path, err) + } + if len(pkgs) != 1 { + return nil, fmt.Errorf("expected exactly 1 package for all sources in %q, got %d", path, len(pkgs)) + } + + defaulters := sets.NewString() + for _, pkg := range pkgs { + ast.Inspect(pkg, func(node ast.Node) bool { + switch typed := node.(type) { + case *ast.Package, *ast.File: + return true + case *ast.FuncDecl: + if typed.Recv == nil && strings.HasPrefix(typed.Name.Name, "SetDefaults_") { + defaulters.Insert(typed.Name.Name) + } + return false + default: + return false + } + }) + } + return defaulters, nil +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/route/doc.go b/openshift-kube-apiserver/admission/customresourcevalidation/route/doc.go new file mode 100644 index 0000000000000..86f4e3954c020 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/route/doc.go @@ -0,0 +1,4 @@ +// +k8s:defaulter-gen=TypeMeta +// +k8s:defaulter-gen-input=github.com/openshift/api/route/v1 + +package route diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/route/validate_route.go b/openshift-kube-apiserver/admission/customresourcevalidation/route/validate_route.go new file mode 100644 index 0000000000000..ba8f004fb30d6 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/route/validate_route.go @@ -0,0 +1,83 @@ +package route + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + authorizationv1client "k8s.io/client-go/kubernetes/typed/authorization/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + + routev1 "github.com/openshift/api/route/v1" + routevalidation "github.com/openshift/library-go/pkg/route/validation" +) + +const PluginName = "route.openshift.io/ValidateRoute" + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return NewValidateRoute() + }) +} + +func toRoute(uncastObj runtime.Object) (*routev1.Route, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + obj, ok := uncastObj.(*routev1.Route) + if !ok { + return nil, field.ErrorList{ + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Route"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{routev1.GroupVersion.String()}), + } + } + + return obj, nil +} + +type routeV1 struct { + secretsGetter func() corev1client.SecretsGetter + sarGetter func() authorizationv1client.SubjectAccessReviewsGetter + routeValidationOptsGetter func() RouteValidationOptionGetter +} + +func (r routeV1) ValidateCreate(ctx context.Context, obj runtime.Object) field.ErrorList { + routeObj, errs := toRoute(obj) + if len(errs) > 0 { + return errs + } + + return routevalidation.ValidateRoute(ctx, routeObj, r.sarGetter().SubjectAccessReviews(), r.secretsGetter(), r.routeValidationOptsGetter().GetValidationOptions()) +} + +func (r routeV1) ValidateUpdate(ctx context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList { + routeObj, errs := toRoute(obj) + if len(errs) > 0 { + return errs + } + + routeOldObj, errs := toRoute(oldObj) + if len(errs) > 0 { + return errs + } + + return routevalidation.ValidateRouteUpdate(ctx, routeObj, routeOldObj, r.sarGetter().SubjectAccessReviews(), r.secretsGetter(), r.routeValidationOptsGetter().GetValidationOptions()) +} + +func (routeV1) ValidateStatusUpdate(_ context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList { + routeObj, errs := toRoute(obj) + if len(errs) > 0 { + return errs + } + + routeOldObj, errs := toRoute(oldObj) + if len(errs) > 0 { + return errs + } + + return routevalidation.ValidateRouteStatusUpdate(routeObj, routeOldObj) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/route/validate_route_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/route/validate_route_test.go new file mode 100644 index 0000000000000..7b57a56be3ee1 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/route/validate_route_test.go @@ -0,0 +1,149 @@ +package route + +import ( + "context" + "testing" + + routev1 "github.com/openshift/api/route/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/kubernetes/fake" +) + +// setupWithFakeClient setter is only available in unit-tests +func (a *validateCustomResourceWithClient) setupWithFakeClient() { + c := fake.NewSimpleClientset() + a.secretsGetter = c.CoreV1() + a.sarGetter = c.AuthorizationV1() + a.routeValidationOptsGetter = NewRouteValidationOpts() +} + +// TestValidateRoutePlugin verifies if the route validation plugin can handle admits +// for the resource {group: api/route/v1, kind: Route} +// will check if validator client is +// conformant with admission.InitializationValidator interface +func TestValidateRoutePlugin(t *testing.T) { + plugin, err := NewValidateRoute() + if err != nil { + t.Fatal(err) + } + + validator, ok := plugin.(*validateCustomResourceWithClient) + if !ok { + t.Fatal("could not type cast returned value of NewValidateRoute() into type validateCustomResourceWithClient, " + + "perhaps you changed the type in the implementation but not in the tests!") + } + + // unit test specific logic as a replacement for routeAdmitter.SetRESTClientConfig(...) + validator.setupWithFakeClient() + + // admission.InitializationValidator -> ValidateInitialization() + err = validator.ValidateInitialization() + if err != nil { + t.Fatal(err) + } + + r1 := &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + }, + Spec: routev1.RouteSpec{ + To: routev1.RouteTargetReference{ + Kind: "Service", + Name: "default", + }, + }, + } + r2 := r1.DeepCopy() + + s1 := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + }, + Data: map[string][]byte{}, + } + s2 := s1.DeepCopy() + + testCases := []struct { + description string + + object runtime.Object + oldObject runtime.Object + + kind schema.GroupVersionKind + resource schema.GroupVersionResource + + name string + namespace string + + expectedError bool + }{ + { + description: "route object is passed to admission plugin with scheme routev1.Route", + + object: runtime.Object(r1), + oldObject: runtime.Object(r2), + + kind: routev1.GroupVersion.WithKind("Route"), + resource: routev1.GroupVersion.WithResource("routes"), + + name: r1.Name, + namespace: r1.Namespace, + + expectedError: false, + }, + { + description: "non-route object is passed to admission plugin with scheme corev1.Secret", + + object: runtime.Object(s1), + oldObject: runtime.Object(s2), + + kind: corev1.SchemeGroupVersion.WithKind("Secret"), + resource: corev1.SchemeGroupVersion.WithResource("secrets"), + + name: s1.Name, + namespace: s1.Namespace, + + expectedError: false, + }, + { + description: "non-route object is passed to admission plugin with conflicting scheme routev1.Route", + + object: runtime.Object(s1), + oldObject: runtime.Object(s2), + + kind: routev1.GroupVersion.WithKind("Route"), + resource: routev1.GroupVersion.WithResource("routes"), + + name: s1.Name, + namespace: s1.Namespace, + + expectedError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + + attr := admission.NewAttributesRecord( + tc.object, tc.oldObject, + tc.kind, tc.name, tc.namespace, tc.resource, + "", admission.Create, nil, false, + &user.DefaultInfo{}, + ) + + switch err := validator.Validate(context.Background(), attr, nil); { + case !tc.expectedError && err != nil: + t.Fatalf("admission error not expected, but found %q", err) + case tc.expectedError && err == nil: + t.Fatal("admission error expected, but got nil") + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/route/validation_opts.go b/openshift-kube-apiserver/admission/customresourcevalidation/route/validation_opts.go new file mode 100644 index 0000000000000..3bbe5c30ad655 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/route/validation_opts.go @@ -0,0 +1,31 @@ +package route + +import ( + "k8s.io/apiserver/pkg/util/feature" + "k8s.io/component-base/featuregate" + + openshiftfeatures "github.com/openshift/api/features" + routecommon "github.com/openshift/library-go/pkg/route" +) + +type RouteValidationOptionGetter interface { + GetValidationOptions() routecommon.RouteValidationOptions +} + +type RouteValidationOpts struct { + opts routecommon.RouteValidationOptions +} + +var _ RouteValidationOptionGetter = &RouteValidationOpts{} + +func NewRouteValidationOpts() *RouteValidationOpts { + return &RouteValidationOpts{ + opts: routecommon.RouteValidationOptions{ + AllowExternalCertificates: feature.DefaultMutableFeatureGate.Enabled(featuregate.Feature(openshiftfeatures.FeatureGateRouteExternalCertificate)), + }, + } +} + +func (o *RouteValidationOpts) GetValidationOptions() routecommon.RouteValidationOptions { + return o.opts +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/route/validation_wrapper.go b/openshift-kube-apiserver/admission/customresourcevalidation/route/validation_wrapper.go new file mode 100644 index 0000000000000..2f0b733353543 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/route/validation_wrapper.go @@ -0,0 +1,92 @@ +package route + +import ( + "fmt" + + routev1 "github.com/openshift/api/route/v1" + "github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig" + authorizationv1client "k8s.io/client-go/kubernetes/typed/authorization/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" + "k8s.io/client-go/rest" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +type validateCustomResourceWithClient struct { + admission.ValidationInterface + + secretsGetter corev1client.SecretsGetter + sarGetter authorizationv1client.SubjectAccessReviewsGetter + routeValidationOptsGetter RouteValidationOptionGetter +} + +func NewValidateRoute() (admission.Interface, error) { + ret := &validateCustomResourceWithClient{} + + delegate, err := customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + routev1.GroupVersion.WithResource("routes").GroupResource(): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + routev1.GroupVersion.WithKind("Route"): routeV1{ + secretsGetter: ret.getSecretsGetter, + sarGetter: ret.getSubjectAccessReviewsGetter, + routeValidationOptsGetter: ret.getRouteValidationOptions, + }, + }) + if err != nil { + return nil, err + } + ret.ValidationInterface = delegate + + return ret, nil +} + +var _ admissionrestconfig.WantsRESTClientConfig = &validateCustomResourceWithClient{} + +func (a *validateCustomResourceWithClient) getSecretsGetter() corev1client.SecretsGetter { + return a.secretsGetter +} + +func (a *validateCustomResourceWithClient) getSubjectAccessReviewsGetter() authorizationv1client.SubjectAccessReviewsGetter { + return a.sarGetter +} + +func (a *validateCustomResourceWithClient) getRouteValidationOptions() RouteValidationOptionGetter { + return a.routeValidationOptsGetter +} + +func (a *validateCustomResourceWithClient) SetRESTClientConfig(restClientConfig rest.Config) { + var err error + + a.secretsGetter, err = corev1client.NewForConfig(&restClientConfig) + if err != nil { + utilruntime.HandleError(err) + return + } + + a.sarGetter, err = authorizationv1client.NewForConfig(&restClientConfig) + if err != nil { + utilruntime.HandleError(err) + return + } + + a.routeValidationOptsGetter = NewRouteValidationOpts() +} + +func (a *validateCustomResourceWithClient) ValidateInitialization() error { + if a.secretsGetter == nil { + return fmt.Errorf("%s needs a secretsGetter", PluginName) + } + if a.sarGetter == nil { + return fmt.Errorf("%s needs a subjectAccessReviewsGetter", PluginName) + } + if a.routeValidationOptsGetter == nil { + return fmt.Errorf("%s needs a routeValidationOptsGetter", PluginName) + } + + return nil +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/route/zz_generated.defaults.go b/openshift-kube-apiserver/admission/customresourcevalidation/route/zz_generated.defaults.go new file mode 100644 index 0000000000000..a01f63f328c5f --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/route/zz_generated.defaults.go @@ -0,0 +1,59 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package route + +import ( + v1 "github.com/openshift/api/route/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&v1.Route{}, func(obj interface{}) { SetObjectDefaults_Route(obj.(*v1.Route)) }) + scheme.AddTypeDefaultingFunc(&v1.RouteList{}, func(obj interface{}) { SetObjectDefaults_RouteList(obj.(*v1.RouteList)) }) + return nil +} + +func SetObjectDefaults_Route(in *v1.Route) { + SetDefaults_RouteSpec(&in.Spec) + SetDefaults_RouteTargetReference(&in.Spec.To) + for i := range in.Spec.AlternateBackends { + a := &in.Spec.AlternateBackends[i] + SetDefaults_RouteTargetReference(a) + } + if in.Spec.TLS != nil { + SetDefaults_TLSConfig(in.Spec.TLS) + } + for i := range in.Status.Ingress { + a := &in.Status.Ingress[i] + SetDefaults_RouteIngress(a) + } +} + +func SetObjectDefaults_RouteList(in *v1.RouteList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Route(a) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/scheduler/validate_scheduler.go b/openshift-kube-apiserver/admission/customresourcevalidation/scheduler/validate_scheduler.go new file mode 100644 index 0000000000000..dddf0c70209d2 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/scheduler/validate_scheduler.go @@ -0,0 +1,107 @@ +package scheduler + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateScheduler" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.Resource("schedulers"): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("Scheduler"): schedulerV1{}, + }) + }) +} + +func toSchedulerV1(uncastObj runtime.Object) (*configv1.Scheduler, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.Scheduler) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Scheduler"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type schedulerV1 struct { +} + +func validateSchedulerSpec(spec configv1.SchedulerSpec) field.ErrorList { + allErrs := field.ErrorList{} + + if name := spec.Policy.Name; len(name) > 0 { + for _, msg := range validation.NameIsDNSSubdomain(spec.Policy.Name, false) { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec.Policy.name"), name, msg)) + } + } + + return allErrs +} + +func (schedulerV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, allErrs := toSchedulerV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + allErrs = append(allErrs, validateSchedulerSpec(obj.Spec)...) + + return allErrs +} + +func (schedulerV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, allErrs := toSchedulerV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + oldObj, allErrs := toSchedulerV1(uncastOldObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + allErrs = append(allErrs, validateSchedulerSpec(obj.Spec)...) + + return allErrs +} + +func (schedulerV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toSchedulerV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toSchedulerV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaulting_scc.go b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaulting_scc.go new file mode 100644 index 0000000000000..1a7193eff7c75 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaulting_scc.go @@ -0,0 +1,93 @@ +package securitycontextconstraints + +import ( + "bytes" + "context" + "io" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" + + securityv1 "github.com/openshift/api/security/v1" +) + +const DefaultingPluginName = "security.openshift.io/DefaultSecurityContextConstraints" + +func RegisterDefaulting(plugins *admission.Plugins) { + plugins.Register(DefaultingPluginName, func(config io.Reader) (admission.Interface, error) { + return NewDefaulter(), nil + }) +} + +type defaultSCC struct { + *admission.Handler + + scheme *runtime.Scheme + codecFactory runtimeserializer.CodecFactory +} + +var _ admission.MutationInterface = &defaultSCC{} + +func NewDefaulter() admission.Interface { + scheme := runtime.NewScheme() + codecFactory := runtimeserializer.NewCodecFactory(scheme) + utilruntime.Must(securityv1.Install(scheme)) + + return &defaultSCC{ + Handler: admission.NewHandler(admission.Create, admission.Update), + scheme: scheme, + codecFactory: codecFactory, + } +} + +// Admit defaults an SCC by going unstructured > external > internal > external > unstructured +func (a *defaultSCC) Admit(ctx context.Context, attributes admission.Attributes, o admission.ObjectInterfaces) error { + if a.shouldIgnore(attributes) { + return nil + } + + unstructuredOrig, ok := attributes.GetObject().(*unstructured.Unstructured) + if !ok { + return nil + } + buf := &bytes.Buffer{} + if err := unstructured.UnstructuredJSONScheme.Encode(unstructuredOrig, buf); err != nil { + return err + } + + uncastObj, err := runtime.Decode(a.codecFactory.UniversalDeserializer(), buf.Bytes()) + if err != nil { + return err + } + + outSCCExternal := uncastObj.(*securityv1.SecurityContextConstraints) + SetDefaults_SCC(outSCCExternal) + defaultedBytes, err := runtime.Encode(a.codecFactory.LegacyCodec(securityv1.GroupVersion), outSCCExternal) + if err != nil { + return err + } + outUnstructured := &unstructured.Unstructured{} + if _, _, err := unstructured.UnstructuredJSONScheme.Decode(defaultedBytes, nil, outUnstructured); err != nil { + return err + } + + unstructuredOrig.Object = outUnstructured.Object + + return nil +} + +func (a *defaultSCC) shouldIgnore(attributes admission.Attributes) bool { + if attributes.GetResource().GroupResource() != (schema.GroupResource{Group: "security.openshift.io", Resource: "securitycontextconstraints"}) { + return true + } + // if a subresource is specified, skip it + if len(attributes.GetSubresource()) > 0 { + return true + } + + return false +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaulting_scc_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaulting_scc_test.go new file mode 100644 index 0000000000000..16c6d56af2e2f --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaulting_scc_test.go @@ -0,0 +1,274 @@ +package securitycontextconstraints + +import ( + "bytes" + "context" + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apiserver/pkg/admission" + + securityv1 "github.com/openshift/api/security/v1" + sccutil "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util" +) + +func TestDefaultingHappens(t *testing.T) { + inputSCC := `{ + "allowHostDirVolumePlugin": true, + "allowHostNetwork": true, + "allowHostPID": true, + "allowHostPorts": true, + "apiVersion": "security.openshift.io/v1", + "kind": "SecurityContextConstraints", + "metadata": { + "annotations": { + "kubernetes.io/description": "node-exporter scc is used for the Prometheus node exporter" + }, + "name": "node-exporter" + }, + "readOnlyRootFilesystem": false, + "runAsUser": { + "type": "RunAsAny" + }, + "seLinuxContext": { + "type": "RunAsAny" + }, + "users": [] +}` + + inputUnstructured := &unstructured.Unstructured{} + _, _, err := unstructured.UnstructuredJSONScheme.Decode([]byte(inputSCC), nil, inputUnstructured) + if err != nil { + t.Fatal(err) + } + + attributes := admission.NewAttributesRecord(inputUnstructured, nil, schema.GroupVersionKind{}, "", "", schema.GroupVersionResource{Group: "security.openshift.io", Resource: "securitycontextconstraints"}, "", admission.Create, nil, false, nil) + defaulter := NewDefaulter() + if err := defaulter.(*defaultSCC).Admit(context.TODO(), attributes, nil); err != nil { + t.Fatal(err) + } + + buf := &bytes.Buffer{} + if err := unstructured.UnstructuredJSONScheme.Encode(inputUnstructured, buf); err != nil { + t.Fatal(err) + } + + expectedSCC := `{ + "allowHostDirVolumePlugin": true, + "allowHostIPC": false, + "allowHostNetwork": true, + "allowHostPID": true, + "allowHostPorts": true, + "allowPrivilegeEscalation": true, + "allowPrivilegedContainer": false, + "allowedCapabilities": null, + "apiVersion": "security.openshift.io/v1", + "defaultAddCapabilities": null, + "fsGroup": { + "type": "RunAsAny" + }, + "groups": [], + "kind": "SecurityContextConstraints", + "metadata": { + "annotations": { + "kubernetes.io/description": "node-exporter scc is used for the Prometheus node exporter" + }, + "name": "node-exporter", + "creationTimestamp":null + }, + "priority": null, + "readOnlyRootFilesystem": false, + "requiredDropCapabilities": null, + "runAsUser": { + "type": "RunAsAny" + }, + "seLinuxContext": { + "type": "RunAsAny" + }, + "supplementalGroups": { + "type": "RunAsAny" + }, + "users": [], + "volumes": [ + "*" + ] +}` + expectedUnstructured := &unstructured.Unstructured{} + if _, _, err := unstructured.UnstructuredJSONScheme.Decode([]byte(expectedSCC), nil, expectedUnstructured); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(expectedUnstructured.Object, inputUnstructured.Object) { + t.Fatal(diff.ObjectDiff(expectedUnstructured.Object, inputUnstructured.Object)) + } +} + +func TestDefaultSecurityContextConstraints(t *testing.T) { + tests := map[string]struct { + scc *securityv1.SecurityContextConstraints + expectedFSGroup securityv1.FSGroupStrategyType + expectedSupGroup securityv1.SupplementalGroupsStrategyType + }{ + "shouldn't default": { + scc: &securityv1.SecurityContextConstraints{ + FSGroup: securityv1.FSGroupStrategyOptions{ + Type: securityv1.FSGroupStrategyMustRunAs, + }, + SupplementalGroups: securityv1.SupplementalGroupsStrategyOptions{ + Type: securityv1.SupplementalGroupsStrategyMustRunAs, + }, + }, + expectedFSGroup: securityv1.FSGroupStrategyMustRunAs, + expectedSupGroup: securityv1.SupplementalGroupsStrategyMustRunAs, + }, + "default fsgroup runAsAny": { + scc: &securityv1.SecurityContextConstraints{ + RunAsUser: securityv1.RunAsUserStrategyOptions{ + Type: securityv1.RunAsUserStrategyRunAsAny, + }, + SupplementalGroups: securityv1.SupplementalGroupsStrategyOptions{ + Type: securityv1.SupplementalGroupsStrategyMustRunAs, + }, + }, + expectedFSGroup: securityv1.FSGroupStrategyRunAsAny, + expectedSupGroup: securityv1.SupplementalGroupsStrategyMustRunAs, + }, + "default sup group runAsAny": { + scc: &securityv1.SecurityContextConstraints{ + RunAsUser: securityv1.RunAsUserStrategyOptions{ + Type: securityv1.RunAsUserStrategyRunAsAny, + }, + FSGroup: securityv1.FSGroupStrategyOptions{ + Type: securityv1.FSGroupStrategyMustRunAs, + }, + }, + expectedFSGroup: securityv1.FSGroupStrategyMustRunAs, + expectedSupGroup: securityv1.SupplementalGroupsStrategyRunAsAny, + }, + "default fsgroup runAsAny with mustRunAs UID strategy": { + scc: &securityv1.SecurityContextConstraints{ + RunAsUser: securityv1.RunAsUserStrategyOptions{ + Type: securityv1.RunAsUserStrategyMustRunAsRange, + }, + SupplementalGroups: securityv1.SupplementalGroupsStrategyOptions{ + Type: securityv1.SupplementalGroupsStrategyMustRunAs, + }, + }, + expectedFSGroup: securityv1.FSGroupStrategyRunAsAny, + expectedSupGroup: securityv1.SupplementalGroupsStrategyMustRunAs, + }, + "default sup group runAsAny with mustRunAs UID strategy": { + scc: &securityv1.SecurityContextConstraints{ + RunAsUser: securityv1.RunAsUserStrategyOptions{ + Type: securityv1.RunAsUserStrategyMustRunAsRange, + }, + FSGroup: securityv1.FSGroupStrategyOptions{ + Type: securityv1.FSGroupStrategyMustRunAs, + }, + }, + expectedFSGroup: securityv1.FSGroupStrategyMustRunAs, + expectedSupGroup: securityv1.SupplementalGroupsStrategyRunAsAny, + }, + } + for k, v := range tests { + SetDefaults_SCC(v.scc) + if v.scc.FSGroup.Type != v.expectedFSGroup { + t.Errorf("%s has invalid fsgroup. Expected: %v got: %v", k, v.expectedFSGroup, v.scc.FSGroup.Type) + } + if v.scc.SupplementalGroups.Type != v.expectedSupGroup { + t.Errorf("%s has invalid supplemental group. Expected: %v got: %v", k, v.expectedSupGroup, v.scc.SupplementalGroups.Type) + } + } +} + +func TestDefaultSCCVolumes(t *testing.T) { + tests := map[string]struct { + scc *securityv1.SecurityContextConstraints + expectedVolumes []securityv1.FSType + expectedHostDir bool + }{ + // this expects the volumes to default to all for an empty volume slice + // but since the host dir setting is false it should be all - host dir + "old client - default allow* fields, no volumes slice": { + scc: &securityv1.SecurityContextConstraints{}, + expectedVolumes: StringSetToFSType(sccutil.GetAllFSTypesExcept(string(securityv1.FSTypeHostPath))), + expectedHostDir: false, + }, + // this expects the volumes to default to all for an empty volume slice + "old client - set allowHostDir true fields, no volumes slice": { + scc: &securityv1.SecurityContextConstraints{ + AllowHostDirVolumePlugin: true, + }, + expectedVolumes: []securityv1.FSType{securityv1.FSTypeAll}, + expectedHostDir: true, + }, + "new client - allow* fields set with matching volume slice": { + scc: &securityv1.SecurityContextConstraints{ + Volumes: []securityv1.FSType{securityv1.FSTypeEmptyDir, securityv1.FSTypeHostPath}, + AllowHostDirVolumePlugin: true, + }, + expectedVolumes: []securityv1.FSType{securityv1.FSTypeEmptyDir, securityv1.FSTypeHostPath}, + expectedHostDir: true, + }, + "new client - allow* fields set with mismatch host dir volume slice": { + scc: &securityv1.SecurityContextConstraints{ + Volumes: []securityv1.FSType{securityv1.FSTypeEmptyDir, securityv1.FSTypeHostPath}, + AllowHostDirVolumePlugin: false, + }, + expectedVolumes: []securityv1.FSType{securityv1.FSTypeEmptyDir}, + expectedHostDir: false, + }, + "new client - allow* fields set with mismatch FSTypeAll volume slice": { + scc: &securityv1.SecurityContextConstraints{ + Volumes: []securityv1.FSType{securityv1.FSTypeAll}, + AllowHostDirVolumePlugin: false, + }, + expectedVolumes: StringSetToFSType(sccutil.GetAllFSTypesExcept(string(securityv1.FSTypeHostPath))), + expectedHostDir: false, + }, + "new client - allow* fields unset with volume slice": { + scc: &securityv1.SecurityContextConstraints{ + Volumes: []securityv1.FSType{securityv1.FSTypeEmptyDir, securityv1.FSTypeHostPath}, + }, + expectedVolumes: []securityv1.FSType{securityv1.FSTypeEmptyDir}, + expectedHostDir: false, + }, + "new client - extra volume params retained": { + scc: &securityv1.SecurityContextConstraints{ + Volumes: []securityv1.FSType{securityv1.FSTypeEmptyDir, securityv1.FSTypeHostPath, securityv1.FSTypeGitRepo}, + }, + expectedVolumes: []securityv1.FSType{securityv1.FSTypeEmptyDir, securityv1.FSTypeGitRepo}, + expectedHostDir: false, + }, + "new client - empty volume slice, host dir true": { + scc: &securityv1.SecurityContextConstraints{ + Volumes: []securityv1.FSType{}, + AllowHostDirVolumePlugin: true, + }, + expectedVolumes: []securityv1.FSType{securityv1.FSTypeHostPath}, + expectedHostDir: true, + }, + "new client - empty volume slice, host dir false": { + scc: &securityv1.SecurityContextConstraints{ + Volumes: []securityv1.FSType{}, + AllowHostDirVolumePlugin: false, + }, + expectedVolumes: []securityv1.FSType{securityv1.FSTypeNone}, + expectedHostDir: false, + }, + } + for k, v := range tests { + SetDefaults_SCC(v.scc) + + if !reflect.DeepEqual(v.scc.Volumes, v.expectedVolumes) { + t.Errorf("%s has invalid volumes. Expected: %v got: %v", k, v.expectedVolumes, v.scc.Volumes) + } + + if v.scc.AllowHostDirVolumePlugin != v.expectedHostDir { + t.Errorf("%s has invalid host dir. Expected: %v got: %v", k, v.expectedHostDir, v.scc.AllowHostDirVolumePlugin) + } + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaults.go b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaults.go new file mode 100644 index 0000000000000..e6e4b5ff44fc7 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaults.go @@ -0,0 +1,100 @@ +package securitycontextconstraints + +import ( + "k8s.io/apimachinery/pkg/util/sets" + + securityv1 "github.com/openshift/api/security/v1" + sccutil "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util" +) + +// Default SCCs for new fields. FSGroup and SupplementalGroups are +// set to the RunAsAny strategy if they are unset on the scc. +func SetDefaults_SCC(scc *securityv1.SecurityContextConstraints) { + if len(scc.FSGroup.Type) == 0 { + scc.FSGroup.Type = securityv1.FSGroupStrategyRunAsAny + } + if len(scc.SupplementalGroups.Type) == 0 { + scc.SupplementalGroups.Type = securityv1.SupplementalGroupsStrategyRunAsAny + } + + if scc.Users == nil { + scc.Users = []string{} + } + if scc.Groups == nil { + scc.Groups = []string{} + } + + var defaultAllowedVolumes sets.String + switch { + case scc.Volumes == nil: + // assume a nil volume slice is allowing everything for backwards compatibility + defaultAllowedVolumes = sets.NewString(string(securityv1.FSTypeAll)) + + case len(scc.Volumes) == 0 && scc.AllowHostDirVolumePlugin: + // an empty volume slice means "allow no volumes", but the boolean fields will always take precedence. + defaultAllowedVolumes = sets.NewString(string(securityv1.FSTypeHostPath)) + + case len(scc.Volumes) == 0 && !scc.AllowHostDirVolumePlugin: + // an empty volume slice means "allow no volumes", but cannot be persisted in protobuf. + // convert this to volumes:["none"] + defaultAllowedVolumes = sets.NewString(string(securityv1.FSTypeNone)) + + default: + // defaults the volume slice of the SCC. + // In order to support old clients the boolean fields will always take precedence. + defaultAllowedVolumes = fsTypeToStringSet(scc.Volumes) + } + + if scc.AllowHostDirVolumePlugin { + // if already allowing all then there is no reason to add + if !defaultAllowedVolumes.Has(string(securityv1.FSTypeAll)) { + defaultAllowedVolumes.Insert(string(securityv1.FSTypeHostPath)) + } + } else { + // we should only default all volumes if the SCC came in with FSTypeAll or we defaulted it + // otherwise we should only change the volumes slice to ensure that it does not conflict with + // the AllowHostDirVolumePlugin setting + shouldDefaultAllVolumes := defaultAllowedVolumes.Has(string(securityv1.FSTypeAll)) + + // remove anything from volumes that conflicts with AllowHostDirVolumePlugin = false + defaultAllowedVolumes.Delete(string(securityv1.FSTypeAll)) + defaultAllowedVolumes.Delete(string(securityv1.FSTypeHostPath)) + + if shouldDefaultAllVolumes { + allVolumes := sccutil.GetAllFSTypesExcept(string(securityv1.FSTypeHostPath)) + defaultAllowedVolumes.Insert(allVolumes.List()...) + } + } + + scc.Volumes = StringSetToFSType(defaultAllowedVolumes) + + // Constraints that do not include this field must remain as permissive as + // they were prior to the introduction of this field. + if scc.AllowPrivilegeEscalation == nil { + t := true + scc.AllowPrivilegeEscalation = &t + } + +} + +func StringSetToFSType(set sets.String) []securityv1.FSType { + if set == nil { + return nil + } + volumes := []securityv1.FSType{} + for _, v := range set.List() { + volumes = append(volumes, securityv1.FSType(v)) + } + return volumes +} + +func fsTypeToStringSet(volumes []securityv1.FSType) sets.String { + if volumes == nil { + return nil + } + set := sets.NewString() + for _, v := range volumes { + set.Insert(string(v)) + } + return set +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validate_scc.go b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validate_scc.go new file mode 100644 index 0000000000000..7928686b7ac13 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validate_scc.go @@ -0,0 +1,80 @@ +package securitycontextconstraints + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + securityv1 "github.com/openshift/api/security/v1" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" + sccvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation" +) + +const PluginName = "security.openshift.io/ValidateSecurityContextConstraints" + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + {Group: securityv1.GroupName, Resource: "securitycontextconstraints"}: true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + securityv1.GroupVersion.WithKind("SecurityContextConstraints"): securityContextConstraintsV1{}, + }) + }) +} + +func toSecurityContextConstraints(uncastObj runtime.Object) (*securityv1.SecurityContextConstraints, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + obj, ok := uncastObj.(*securityv1.SecurityContextConstraints) + if !ok { + return nil, field.ErrorList{ + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"SecurityContextConstraints"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{securityv1.GroupVersion.String()}), + } + } + + return obj, nil +} + +type securityContextConstraintsV1 struct { +} + +func (securityContextConstraintsV1) ValidateCreate(_ context.Context, obj runtime.Object) field.ErrorList { + securityContextConstraintsObj, errs := toSecurityContextConstraints(obj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, sccvalidation.ValidateSecurityContextConstraints(securityContextConstraintsObj)...) + + return errs +} + +func (securityContextConstraintsV1) ValidateUpdate(_ context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList { + securityContextConstraintsObj, errs := toSecurityContextConstraints(obj) + if len(errs) > 0 { + return errs + } + securityContextConstraintsOldObj, errs := toSecurityContextConstraints(oldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, sccvalidation.ValidateSecurityContextConstraintsUpdate(securityContextConstraintsObj, securityContextConstraintsOldObj)...) + + return errs +} + +func (c securityContextConstraintsV1) ValidateStatusUpdate(ctx context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList { + return c.ValidateUpdate(ctx, obj, oldObj) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation/validation.go b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation/validation.go new file mode 100644 index 0000000000000..493339867b8c5 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation/validation.go @@ -0,0 +1,275 @@ +package validation + +import ( + "fmt" + "regexp" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/validation" + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + kapivalidation "k8s.io/kubernetes/pkg/apis/core/validation" + + securityv1 "github.com/openshift/api/security/v1" +) + +// ValidateSecurityContextConstraintsName can be used to check whether the given +// security context constraint name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateSecurityContextConstraintsName = apimachineryvalidation.NameIsDNSSubdomain + +func ValidateSecurityContextConstraints(scc *securityv1.SecurityContextConstraints) field.ErrorList { + allErrs := validation.ValidateObjectMeta(&scc.ObjectMeta, false, ValidateSecurityContextConstraintsName, field.NewPath("metadata")) + + if scc.Priority != nil { + if *scc.Priority < 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("priority"), *scc.Priority, "priority cannot be negative")) + } + } + + // ensure the user strategy has a valid type + runAsUserPath := field.NewPath("runAsUser") + switch scc.RunAsUser.Type { + case securityv1.RunAsUserStrategyMustRunAs, securityv1.RunAsUserStrategyMustRunAsNonRoot, securityv1.RunAsUserStrategyRunAsAny, securityv1.RunAsUserStrategyMustRunAsRange: + //good types + default: + msg := fmt.Sprintf("invalid strategy type. Valid values are %s, %s, %s, %s", securityv1.RunAsUserStrategyMustRunAs, securityv1.RunAsUserStrategyMustRunAsNonRoot, securityv1.RunAsUserStrategyMustRunAsRange, securityv1.RunAsUserStrategyRunAsAny) + allErrs = append(allErrs, field.Invalid(runAsUserPath.Child("type"), scc.RunAsUser.Type, msg)) + } + + // if specified, uid cannot be negative + if scc.RunAsUser.UID != nil { + if *scc.RunAsUser.UID < 0 { + allErrs = append(allErrs, field.Invalid(runAsUserPath.Child("uid"), *scc.RunAsUser.UID, "uid cannot be negative")) + } + } + + // ensure the selinux strategy has a valid type + seLinuxContextPath := field.NewPath("seLinuxContext") + switch scc.SELinuxContext.Type { + case securityv1.SELinuxStrategyMustRunAs, securityv1.SELinuxStrategyRunAsAny: + //good types + default: + msg := fmt.Sprintf("invalid strategy type. Valid values are %s, %s", securityv1.SELinuxStrategyMustRunAs, securityv1.SELinuxStrategyRunAsAny) + allErrs = append(allErrs, field.Invalid(seLinuxContextPath.Child("type"), scc.SELinuxContext.Type, msg)) + } + + // ensure the fsgroup strategy has a valid type + if scc.FSGroup.Type != securityv1.FSGroupStrategyMustRunAs && scc.FSGroup.Type != securityv1.FSGroupStrategyRunAsAny { + allErrs = append(allErrs, field.NotSupported(field.NewPath("fsGroup", "type"), scc.FSGroup.Type, + []string{string(securityv1.FSGroupStrategyMustRunAs), string(securityv1.FSGroupStrategyRunAsAny)})) + } + allErrs = append(allErrs, validateIDRanges(scc.FSGroup.Ranges, field.NewPath("fsGroup"))...) + + if scc.SupplementalGroups.Type != securityv1.SupplementalGroupsStrategyMustRunAs && + scc.SupplementalGroups.Type != securityv1.SupplementalGroupsStrategyRunAsAny { + allErrs = append(allErrs, field.NotSupported(field.NewPath("supplementalGroups", "type"), scc.SupplementalGroups.Type, + []string{string(securityv1.SupplementalGroupsStrategyMustRunAs), string(securityv1.SupplementalGroupsStrategyRunAsAny)})) + } + allErrs = append(allErrs, validateIDRanges(scc.SupplementalGroups.Ranges, field.NewPath("supplementalGroups"))...) + + // validate capabilities + allErrs = append(allErrs, validateSCCCapsAgainstDrops(scc.RequiredDropCapabilities, scc.DefaultAddCapabilities, field.NewPath("defaultAddCapabilities"))...) + allErrs = append(allErrs, validateSCCCapsAgainstDrops(scc.RequiredDropCapabilities, scc.AllowedCapabilities, field.NewPath("allowedCapabilities"))...) + + if hasCap(securityv1.AllowAllCapabilities, scc.AllowedCapabilities) && len(scc.RequiredDropCapabilities) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("requiredDropCapabilities"), scc.RequiredDropCapabilities, + "required capabilities must be empty when all capabilities are allowed by a wildcard")) + } + + allErrs = append(allErrs, validateSCCDefaultAllowPrivilegeEscalation(field.NewPath("defaultAllowPrivilegeEscalation"), scc.DefaultAllowPrivilegeEscalation, scc.AllowPrivilegeEscalation)...) + + allowsFlexVolumes := false + hasNoneVolume := false + + if len(scc.Volumes) > 0 { + for _, fsType := range scc.Volumes { + if fsType == securityv1.FSTypeNone { + hasNoneVolume = true + + } else if fsType == securityv1.FSTypeFlexVolume || fsType == securityv1.FSTypeAll { + allowsFlexVolumes = true + } + } + } + + if hasNoneVolume && len(scc.Volumes) > 1 { + allErrs = append(allErrs, field.Invalid(field.NewPath("volumes"), scc.Volumes, + "if 'none' is specified, no other values are allowed")) + } + + if len(scc.AllowedFlexVolumes) > 0 { + if allowsFlexVolumes { + for idx, allowedFlexVolume := range scc.AllowedFlexVolumes { + if len(allowedFlexVolume.Driver) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("allowedFlexVolumes").Index(idx).Child("driver"), + "must specify a driver")) + } + } + } else { + allErrs = append(allErrs, field.Invalid(field.NewPath("allowedFlexVolumes"), scc.AllowedFlexVolumes, + "volumes does not include 'flexVolume' or '*', so no flex volumes are allowed")) + } + } + + allowedUnsafeSysctlsPath := field.NewPath("allowedUnsafeSysctls") + forbiddenSysctlsPath := field.NewPath("forbiddenSysctls") + allErrs = append(allErrs, validateSCCSysctls(allowedUnsafeSysctlsPath, scc.AllowedUnsafeSysctls)...) + allErrs = append(allErrs, validateSCCSysctls(forbiddenSysctlsPath, scc.ForbiddenSysctls)...) + allErrs = append(allErrs, validatePodSecurityPolicySysctlListsDoNotOverlap(allowedUnsafeSysctlsPath, forbiddenSysctlsPath, scc.AllowedUnsafeSysctls, scc.ForbiddenSysctls)...) + + return allErrs +} + +const sysctlPatternSegmentFmt string = "([a-z0-9][-_a-z0-9]*)?[a-z0-9*]" +const sysctlPatternFmt string = "(" + kapivalidation.SysctlSegmentFmt + "\\.)*" + sysctlPatternSegmentFmt + +var sysctlPatternRegexp = regexp.MustCompile("^" + sysctlPatternFmt + "$") + +func IsValidSysctlPattern(name string) bool { + if len(name) > kapivalidation.SysctlMaxLength { + return false + } + return sysctlPatternRegexp.MatchString(name) +} + +// validatePodSecurityPolicySysctlListsDoNotOverlap validates the values in forbiddenSysctls and allowedSysctls fields do not overlap. +func validatePodSecurityPolicySysctlListsDoNotOverlap(allowedSysctlsFldPath, forbiddenSysctlsFldPath *field.Path, allowedUnsafeSysctls, forbiddenSysctls []string) field.ErrorList { + allErrs := field.ErrorList{} + for i, allowedSysctl := range allowedUnsafeSysctls { + isAllowedSysctlPattern := false + allowedSysctlPrefix := "" + if strings.HasSuffix(allowedSysctl, "*") { + isAllowedSysctlPattern = true + allowedSysctlPrefix = strings.TrimSuffix(allowedSysctl, "*") + } + for j, forbiddenSysctl := range forbiddenSysctls { + isForbiddenSysctlPattern := false + forbiddenSysctlPrefix := "" + if strings.HasSuffix(forbiddenSysctl, "*") { + isForbiddenSysctlPattern = true + forbiddenSysctlPrefix = strings.TrimSuffix(forbiddenSysctl, "*") + } + switch { + case isAllowedSysctlPattern && isForbiddenSysctlPattern: + if strings.HasPrefix(allowedSysctlPrefix, forbiddenSysctlPrefix) { + allErrs = append(allErrs, field.Invalid(allowedSysctlsFldPath.Index(i), allowedUnsafeSysctls[i], fmt.Sprintf("sysctl overlaps with %v", forbiddenSysctl))) + } else if strings.HasPrefix(forbiddenSysctlPrefix, allowedSysctlPrefix) { + allErrs = append(allErrs, field.Invalid(forbiddenSysctlsFldPath.Index(j), forbiddenSysctls[j], fmt.Sprintf("sysctl overlaps with %v", allowedSysctl))) + } + case isAllowedSysctlPattern: + if strings.HasPrefix(forbiddenSysctl, allowedSysctlPrefix) { + allErrs = append(allErrs, field.Invalid(forbiddenSysctlsFldPath.Index(j), forbiddenSysctls[j], fmt.Sprintf("sysctl overlaps with %v", allowedSysctl))) + } + case isForbiddenSysctlPattern: + if strings.HasPrefix(allowedSysctl, forbiddenSysctlPrefix) { + allErrs = append(allErrs, field.Invalid(allowedSysctlsFldPath.Index(i), allowedUnsafeSysctls[i], fmt.Sprintf("sysctl overlaps with %v", forbiddenSysctl))) + } + default: + if allowedSysctl == forbiddenSysctl { + allErrs = append(allErrs, field.Invalid(allowedSysctlsFldPath.Index(i), allowedUnsafeSysctls[i], fmt.Sprintf("sysctl overlaps with %v", forbiddenSysctl))) + } + } + } + } + return allErrs +} + +// validatePodSecurityPolicySysctls validates the sysctls fields of PodSecurityPolicy. +func validateSCCSysctls(fldPath *field.Path, sysctls []string) field.ErrorList { + allErrs := field.ErrorList{} + + if len(sysctls) == 0 { + return allErrs + } + + coversAll := false + for i, s := range sysctls { + if len(s) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), sysctls[i], fmt.Sprintf("empty sysctl not allowed"))) + } else if !IsValidSysctlPattern(string(s)) { + allErrs = append( + allErrs, + field.Invalid(fldPath.Index(i), sysctls[i], fmt.Sprintf("must have at most %d characters and match regex %s", + kapivalidation.SysctlMaxLength, + sysctlPatternFmt, + )), + ) + } else if s[0] == '*' { + coversAll = true + } + } + + if coversAll && len(sysctls) > 1 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("items"), fmt.Sprintf("if '*' is present, must not specify other sysctls"))) + } + + return allErrs +} + +// validateSCCCapsAgainstDrops ensures an allowed cap is not listed in the required drops. +func validateSCCCapsAgainstDrops(requiredDrops []corev1.Capability, capsToCheck []corev1.Capability, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if requiredDrops == nil { + return allErrs + } + for _, cap := range capsToCheck { + if hasCap(cap, requiredDrops) { + allErrs = append(allErrs, field.Invalid(fldPath, cap, + fmt.Sprintf("capability is listed in %s and requiredDropCapabilities", fldPath.String()))) + } + } + return allErrs +} + +// validateSCCDefaultAllowPrivilegeEscalation validates the DefaultAllowPrivilegeEscalation field against the AllowPrivilegeEscalation field of a SecurityContextConstraints. +func validateSCCDefaultAllowPrivilegeEscalation(fldPath *field.Path, defaultAllowPrivilegeEscalation, allowPrivilegeEscalation *bool) field.ErrorList { + allErrs := field.ErrorList{} + if defaultAllowPrivilegeEscalation != nil && allowPrivilegeEscalation != nil && *defaultAllowPrivilegeEscalation && !*allowPrivilegeEscalation { + allErrs = append(allErrs, field.Invalid(fldPath, defaultAllowPrivilegeEscalation, "Cannot set DefaultAllowPrivilegeEscalation to true without also setting AllowPrivilegeEscalation to true")) + } + + return allErrs +} + +// hasCap checks for needle in haystack. +func hasCap(needle corev1.Capability, haystack []corev1.Capability) bool { + for _, c := range haystack { + if needle == c { + return true + } + } + return false +} + +// validateIDRanges ensures the range is valid. +func validateIDRanges(rng []securityv1.IDRange, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + for i, r := range rng { + // if 0 <= Min <= Max then we do not need to validate max. It is always greater than or + // equal to 0 and Min. + minPath := fldPath.Child("ranges").Index(i).Child("min") + maxPath := fldPath.Child("ranges").Index(i).Child("max") + + if r.Min < 0 { + allErrs = append(allErrs, field.Invalid(minPath, r.Min, "min cannot be negative")) + } + if r.Max < 0 { + allErrs = append(allErrs, field.Invalid(maxPath, r.Max, "max cannot be negative")) + } + if r.Min > r.Max { + allErrs = append(allErrs, field.Invalid(minPath, r, "min cannot be greater than max")) + } + } + + return allErrs +} + +func ValidateSecurityContextConstraintsUpdate(newScc, oldScc *securityv1.SecurityContextConstraints) field.ErrorList { + allErrs := validation.ValidateObjectMetaUpdate(&newScc.ObjectMeta, &oldScc.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateSecurityContextConstraints(newScc)...) + return allErrs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation/validation_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation/validation_test.go new file mode 100644 index 0000000000000..01c4d472c0a22 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation/validation_test.go @@ -0,0 +1,343 @@ +package validation + +import ( + "fmt" + "testing" + + kcorev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + + securityv1 "github.com/openshift/api/security/v1" +) + +func TestValidateSecurityContextConstraints(t *testing.T) { + var invalidUID int64 = -1 + var invalidPriority int32 = -1 + var validPriority int32 = 1 + yes := true + no := false + + validSCC := func() *securityv1.SecurityContextConstraints { + return &securityv1.SecurityContextConstraints{ + ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + SELinuxContext: securityv1.SELinuxContextStrategyOptions{ + Type: securityv1.SELinuxStrategyRunAsAny, + }, + RunAsUser: securityv1.RunAsUserStrategyOptions{ + Type: securityv1.RunAsUserStrategyRunAsAny, + }, + FSGroup: securityv1.FSGroupStrategyOptions{ + Type: securityv1.FSGroupStrategyRunAsAny, + }, + SupplementalGroups: securityv1.SupplementalGroupsStrategyOptions{ + Type: securityv1.SupplementalGroupsStrategyRunAsAny, + }, + Priority: &validPriority, + } + } + + noUserOptions := validSCC() + noUserOptions.RunAsUser.Type = "" + + noSELinuxOptions := validSCC() + noSELinuxOptions.SELinuxContext.Type = "" + + invalidUserStratType := validSCC() + invalidUserStratType.RunAsUser.Type = "invalid" + + invalidSELinuxStratType := validSCC() + invalidSELinuxStratType.SELinuxContext.Type = "invalid" + + invalidUIDSCC := validSCC() + invalidUIDSCC.RunAsUser.Type = securityv1.RunAsUserStrategyMustRunAs + invalidUIDSCC.RunAsUser.UID = &invalidUID + + missingObjectMetaName := validSCC() + missingObjectMetaName.ObjectMeta.Name = "" + + noFSGroupOptions := validSCC() + noFSGroupOptions.FSGroup.Type = "" + + invalidFSGroupStratType := validSCC() + invalidFSGroupStratType.FSGroup.Type = "invalid" + + noSupplementalGroupsOptions := validSCC() + noSupplementalGroupsOptions.SupplementalGroups.Type = "" + + invalidSupGroupStratType := validSCC() + invalidSupGroupStratType.SupplementalGroups.Type = "invalid" + + invalidRangeMinGreaterThanMax := validSCC() + invalidRangeMinGreaterThanMax.FSGroup.Ranges = []securityv1.IDRange{ + {Min: 2, Max: 1}, + } + + invalidRangeNegativeMin := validSCC() + invalidRangeNegativeMin.FSGroup.Ranges = []securityv1.IDRange{ + {Min: -1, Max: 10}, + } + + invalidRangeNegativeMax := validSCC() + invalidRangeNegativeMax.FSGroup.Ranges = []securityv1.IDRange{ + {Min: 1, Max: -10}, + } + + negativePriority := validSCC() + negativePriority.Priority = &invalidPriority + + requiredCapAddAndDrop := validSCC() + requiredCapAddAndDrop.DefaultAddCapabilities = []kcorev1.Capability{"foo"} + requiredCapAddAndDrop.RequiredDropCapabilities = []kcorev1.Capability{"foo"} + + allowedCapListedInRequiredDrop := validSCC() + allowedCapListedInRequiredDrop.RequiredDropCapabilities = []kcorev1.Capability{"foo"} + allowedCapListedInRequiredDrop.AllowedCapabilities = []kcorev1.Capability{"foo"} + + wildcardAllowedCapAndRequiredDrop := validSCC() + wildcardAllowedCapAndRequiredDrop.RequiredDropCapabilities = []kcorev1.Capability{"foo"} + wildcardAllowedCapAndRequiredDrop.AllowedCapabilities = []kcorev1.Capability{securityv1.AllowAllCapabilities} + + emptyFlexDriver := validSCC() + emptyFlexDriver.Volumes = []securityv1.FSType{securityv1.FSTypeFlexVolume} + emptyFlexDriver.AllowedFlexVolumes = []securityv1.AllowedFlexVolume{{}} + + nonEmptyFlexVolumes := validSCC() + nonEmptyFlexVolumes.AllowedFlexVolumes = []securityv1.AllowedFlexVolume{{Driver: "example/driver"}} + + invalidDefaultAllowPrivilegeEscalation := validSCC() + invalidDefaultAllowPrivilegeEscalation.DefaultAllowPrivilegeEscalation = &yes + invalidDefaultAllowPrivilegeEscalation.AllowPrivilegeEscalation = &no + + invalidAllowedUnsafeSysctlPattern := validSCC() + invalidAllowedUnsafeSysctlPattern.AllowedUnsafeSysctls = []string{"a.*.b"} + + invalidForbiddenSysctlPattern := validSCC() + invalidForbiddenSysctlPattern.ForbiddenSysctls = []string{"a.*.b"} + + invalidOverlappingSysctls := validSCC() + invalidOverlappingSysctls.ForbiddenSysctls = []string{"kernel.*", "net.ipv4.ip_local_port_range"} + invalidOverlappingSysctls.AllowedUnsafeSysctls = []string{"kernel.shmmax", "net.ipv4.ip_local_port_range"} + + invalidDuplicatedSysctls := validSCC() + invalidDuplicatedSysctls.ForbiddenSysctls = []string{"net.ipv4.ip_local_port_range"} + invalidDuplicatedSysctls.AllowedUnsafeSysctls = []string{"net.ipv4.ip_local_port_range"} + + errorCases := map[string]struct { + scc *securityv1.SecurityContextConstraints + errorType field.ErrorType + errorDetail string + }{ + "no user options": { + scc: noUserOptions, + errorType: field.ErrorTypeInvalid, + errorDetail: "invalid strategy type. Valid values are MustRunAs, MustRunAsNonRoot, MustRunAsRange, RunAsAny", + }, + "no selinux options": { + scc: noSELinuxOptions, + errorType: field.ErrorTypeInvalid, + errorDetail: "invalid strategy type. Valid values are MustRunAs, RunAsAny", + }, + "no fsgroup options": { + scc: noFSGroupOptions, + errorType: field.ErrorTypeNotSupported, + errorDetail: "supported values: \"MustRunAs\", \"RunAsAny\"", + }, + "no sup group options": { + scc: noSupplementalGroupsOptions, + errorType: field.ErrorTypeNotSupported, + errorDetail: "supported values: \"MustRunAs\", \"RunAsAny\"", + }, + "invalid user strategy type": { + scc: invalidUserStratType, + errorType: field.ErrorTypeInvalid, + errorDetail: "invalid strategy type. Valid values are MustRunAs, MustRunAsNonRoot, MustRunAsRange, RunAsAny", + }, + "invalid selinux strategy type": { + scc: invalidSELinuxStratType, + errorType: field.ErrorTypeInvalid, + errorDetail: "invalid strategy type. Valid values are MustRunAs, RunAsAny", + }, + "invalid sup group strategy type": { + scc: invalidSupGroupStratType, + errorType: field.ErrorTypeNotSupported, + errorDetail: "supported values: \"MustRunAs\", \"RunAsAny\"", + }, + "invalid fs group strategy type": { + scc: invalidFSGroupStratType, + errorType: field.ErrorTypeNotSupported, + errorDetail: "supported values: \"MustRunAs\", \"RunAsAny\"", + }, + "invalid uid": { + scc: invalidUIDSCC, + errorType: field.ErrorTypeInvalid, + errorDetail: "uid cannot be negative", + }, + "missing object meta name": { + scc: missingObjectMetaName, + errorType: field.ErrorTypeRequired, + errorDetail: "name or generateName is required", + }, + "invalid range min greater than max": { + scc: invalidRangeMinGreaterThanMax, + errorType: field.ErrorTypeInvalid, + errorDetail: "min cannot be greater than max", + }, + "invalid range negative min": { + scc: invalidRangeNegativeMin, + errorType: field.ErrorTypeInvalid, + errorDetail: "min cannot be negative", + }, + "invalid range negative max": { + scc: invalidRangeNegativeMax, + errorType: field.ErrorTypeInvalid, + errorDetail: "max cannot be negative", + }, + "negative priority": { + scc: negativePriority, + errorType: field.ErrorTypeInvalid, + errorDetail: "priority cannot be negative", + }, + "invalid required caps": { + scc: requiredCapAddAndDrop, + errorType: field.ErrorTypeInvalid, + errorDetail: "capability is listed in defaultAddCapabilities and requiredDropCapabilities", + }, + "allowed cap listed in required drops": { + scc: allowedCapListedInRequiredDrop, + errorType: field.ErrorTypeInvalid, + errorDetail: "capability is listed in allowedCapabilities and requiredDropCapabilities", + }, + "all caps allowed by a wildcard and required drops is not empty": { + scc: wildcardAllowedCapAndRequiredDrop, + errorType: field.ErrorTypeInvalid, + errorDetail: "required capabilities must be empty when all capabilities are allowed by a wildcard", + }, + "empty flex volume driver": { + scc: emptyFlexDriver, + errorType: field.ErrorTypeRequired, + errorDetail: "must specify a driver", + }, + "non-empty allowed flex volumes": { + scc: nonEmptyFlexVolumes, + errorType: field.ErrorTypeInvalid, + errorDetail: "volumes does not include 'flexVolume' or '*', so no flex volumes are allowed", + }, + "invalid defaultAllowPrivilegeEscalation": { + scc: invalidDefaultAllowPrivilegeEscalation, + errorType: field.ErrorTypeInvalid, + errorDetail: "Cannot set DefaultAllowPrivilegeEscalation to true without also setting AllowPrivilegeEscalation to true", + }, + "invalid allowed unsafe sysctl pattern": { + scc: invalidAllowedUnsafeSysctlPattern, + errorType: field.ErrorTypeInvalid, + errorDetail: fmt.Sprintf("must have at most 253 characters and match regex %s", sysctlPatternFmt), + }, + "invalid forbidden sysctl pattern": { + scc: invalidForbiddenSysctlPattern, + errorType: field.ErrorTypeInvalid, + errorDetail: fmt.Sprintf("must have at most 253 characters and match regex %s", sysctlPatternFmt), + }, + "invalid overlapping sysctl pattern": { + scc: invalidOverlappingSysctls, + errorType: field.ErrorTypeInvalid, + errorDetail: fmt.Sprintf("sysctl overlaps with %s", invalidOverlappingSysctls.ForbiddenSysctls[0]), + }, + "invalid duplicated sysctls": { + scc: invalidDuplicatedSysctls, + errorType: field.ErrorTypeInvalid, + errorDetail: fmt.Sprintf("sysctl overlaps with %s", invalidDuplicatedSysctls.AllowedUnsafeSysctls[0]), + }, + } + + for k, v := range errorCases { + t.Run(k, func(t *testing.T) { + if errs := ValidateSecurityContextConstraints(v.scc); len(errs) == 0 || errs[0].Type != v.errorType || errs[0].Detail != v.errorDetail { + t.Errorf("Expected error type %q with detail %q, got %v", v.errorType, v.errorDetail, errs) + } + }) + } + + var validUID int64 = 1 + + mustRunAs := validSCC() + mustRunAs.FSGroup.Type = securityv1.FSGroupStrategyMustRunAs + mustRunAs.SupplementalGroups.Type = securityv1.SupplementalGroupsStrategyMustRunAs + mustRunAs.RunAsUser.Type = securityv1.RunAsUserStrategyMustRunAs + mustRunAs.RunAsUser.UID = &validUID + mustRunAs.SELinuxContext.Type = securityv1.SELinuxStrategyMustRunAs + + runAsNonRoot := validSCC() + runAsNonRoot.RunAsUser.Type = securityv1.RunAsUserStrategyMustRunAsNonRoot + + caseInsensitiveAddDrop := validSCC() + caseInsensitiveAddDrop.DefaultAddCapabilities = []kcorev1.Capability{"foo"} + caseInsensitiveAddDrop.RequiredDropCapabilities = []kcorev1.Capability{"FOO"} + + caseInsensitiveAllowedDrop := validSCC() + caseInsensitiveAllowedDrop.RequiredDropCapabilities = []kcorev1.Capability{"FOO"} + caseInsensitiveAllowedDrop.AllowedCapabilities = []kcorev1.Capability{"foo"} + + flexvolumeWhenFlexVolumesAllowed := validSCC() + flexvolumeWhenFlexVolumesAllowed.Volumes = []securityv1.FSType{securityv1.FSTypeFlexVolume} + flexvolumeWhenFlexVolumesAllowed.AllowedFlexVolumes = []securityv1.AllowedFlexVolume{ + {Driver: "example/driver1"}, + } + + flexvolumeWhenAllVolumesAllowed := validSCC() + flexvolumeWhenAllVolumesAllowed.Volumes = []securityv1.FSType{securityv1.FSTypeAll} + flexvolumeWhenAllVolumesAllowed.AllowedFlexVolumes = []securityv1.AllowedFlexVolume{ + {Driver: "example/driver2"}, + } + + validDefaultAllowPrivilegeEscalation := validSCC() + validDefaultAllowPrivilegeEscalation.DefaultAllowPrivilegeEscalation = &yes + validDefaultAllowPrivilegeEscalation.AllowPrivilegeEscalation = &yes + + withForbiddenSysctl := validSCC() + withForbiddenSysctl.ForbiddenSysctls = []string{"net.*"} + + withAllowedUnsafeSysctl := validSCC() + withAllowedUnsafeSysctl.AllowedUnsafeSysctls = []string{"net.ipv4.tcp_max_syn_backlog"} + + successCases := map[string]struct { + scc *securityv1.SecurityContextConstraints + }{ + "must run as": { + scc: mustRunAs, + }, + "run as any": { + scc: validSCC(), + }, + "run as non-root (user only)": { + scc: runAsNonRoot, + }, + "comparison for add -> drop is case sensitive": { + scc: caseInsensitiveAddDrop, + }, + "comparison for allowed -> drop is case sensitive": { + scc: caseInsensitiveAllowedDrop, + }, + "allow white-listed flexVolume when flex volumes are allowed": { + scc: flexvolumeWhenFlexVolumesAllowed, + }, + "allow white-listed flexVolume when all volumes are allowed": { + scc: flexvolumeWhenAllVolumesAllowed, + }, + "valid defaultAllowPrivilegeEscalation as true": { + scc: validDefaultAllowPrivilegeEscalation, + }, + "with network sysctls forbidden": { + scc: withForbiddenSysctl, + }, + "with unsafe net.ipv4.tcp_max_syn_backlog sysctl allowed": { + scc: withAllowedUnsafeSysctl, + }, + } + + for k, v := range successCases { + if errs := ValidateSecurityContextConstraints(v.scc); len(errs) != 0 { + t.Errorf("Expected success for %q, got %v", k, errs) + } + } +} diff --git a/openshift-kube-apiserver/admission/namespaceconditions/decorator.go b/openshift-kube-apiserver/admission/namespaceconditions/decorator.go new file mode 100644 index 0000000000000..02d7fa357cf71 --- /dev/null +++ b/openshift-kube-apiserver/admission/namespaceconditions/decorator.go @@ -0,0 +1,91 @@ +package namespaceconditions + +import ( + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + corev1lister "k8s.io/client-go/listers/core/v1" +) + +// this is a list of namespaces with special meaning. The kube ones are here in particular because +// we don't control their creation or labeling on their creation +var runLevelZeroNamespaces = sets.NewString("default", "kube-system", "kube-public") +var runLevelOneNamespaces = sets.NewString("openshift-node", "openshift-infra", "openshift") + +func init() { + runLevelOneNamespaces.Insert(runLevelZeroNamespaces.List()...) +} + +// NamespaceLabelConditions provides a decorator that can delegate and conditionally add label conditions +type NamespaceLabelConditions struct { + NamespaceClient corev1client.NamespacesGetter + NamespaceLister corev1lister.NamespaceLister + + SkipLevelZeroNames sets.String + SkipLevelOneNames sets.String +} + +func (d *NamespaceLabelConditions) WithNamespaceLabelConditions(admissionPlugin admission.Interface, name string) admission.Interface { + switch { + case d.SkipLevelOneNames.Has(name): + // return a decorated admission plugin that skips runlevel 0 and 1 namespaces based on name (for known values) and + // label. + return &pluginHandlerWithNamespaceNameConditions{ + admissionPlugin: &pluginHandlerWithNamespaceLabelConditions{ + admissionPlugin: admissionPlugin, + namespaceClient: d.NamespaceClient, + namespaceLister: d.NamespaceLister, + namespaceSelector: skipRunLevelOneSelector, + }, + namespacesToExclude: runLevelOneNamespaces, + } + + case d.SkipLevelZeroNames.Has(name): + // return a decorated admission plugin that skips runlevel 0 namespaces based on name (for known values) and + // label. + return &pluginHandlerWithNamespaceNameConditions{ + admissionPlugin: &pluginHandlerWithNamespaceLabelConditions{ + admissionPlugin: admissionPlugin, + namespaceClient: d.NamespaceClient, + namespaceLister: d.NamespaceLister, + namespaceSelector: skipRunLevelZeroSelector, + }, + namespacesToExclude: runLevelZeroNamespaces, + } + + default: + return admissionPlugin + } +} + +// NamespaceLabelSelector provides a decorator that delegates +type NamespaceLabelSelector struct { + namespaceClient corev1client.NamespacesGetter + namespaceLister corev1lister.NamespaceLister + + admissionPluginNamesToDecorate sets.String + namespaceLabelSelector labels.Selector +} + +func NewConditionalAdmissionPlugins(nsClient corev1client.NamespacesGetter, nsLister corev1lister.NamespaceLister, nsSelector labels.Selector, admissionPluginNames ...string) *NamespaceLabelSelector { + return &NamespaceLabelSelector{ + namespaceClient: nsClient, + namespaceLister: nsLister, + admissionPluginNamesToDecorate: sets.NewString(admissionPluginNames...), + namespaceLabelSelector: nsSelector, + } +} + +func (d *NamespaceLabelSelector) WithNamespaceLabelSelector(admissionPlugin admission.Interface, name string) admission.Interface { + if !d.admissionPluginNamesToDecorate.Has(name) { + return admissionPlugin + } + + return &pluginHandlerWithNamespaceLabelConditions{ + admissionPlugin: admissionPlugin, + namespaceClient: d.namespaceClient, + namespaceLister: d.namespaceLister, + namespaceSelector: d.namespaceLabelSelector, + } +} diff --git a/openshift-kube-apiserver/admission/namespaceconditions/labelcondition.go b/openshift-kube-apiserver/admission/namespaceconditions/labelcondition.go new file mode 100644 index 0000000000000..c3ebaf5895306 --- /dev/null +++ b/openshift-kube-apiserver/admission/namespaceconditions/labelcondition.go @@ -0,0 +1,125 @@ +package namespaceconditions + +import ( + "context" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apiserver/pkg/admission" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + corev1lister "k8s.io/client-go/listers/core/v1" +) + +const runLevelLabel = "openshift.io/run-level" + +var ( + skipRunLevelZeroSelector labels.Selector + skipRunLevelOneSelector labels.Selector +) + +func init() { + var err error + skipRunLevelZeroSelector, err = labels.Parse(runLevelLabel + " notin ( 0 )") + if err != nil { + panic(err) + } + skipRunLevelOneSelector, err = labels.Parse(runLevelLabel + " notin ( 0,1 )") + if err != nil { + panic(err) + } +} + +// pluginHandlerWithNamespaceLabelConditions wraps an admission plugin in a conditional skip based on namespace labels +type pluginHandlerWithNamespaceLabelConditions struct { + admissionPlugin admission.Interface + namespaceClient corev1client.NamespacesGetter + namespaceLister corev1lister.NamespaceLister + namespaceSelector labels.Selector +} + +var _ admission.ValidationInterface = &pluginHandlerWithNamespaceLabelConditions{} +var _ admission.MutationInterface = &pluginHandlerWithNamespaceLabelConditions{} + +func (p pluginHandlerWithNamespaceLabelConditions) Handles(operation admission.Operation) bool { + return p.admissionPlugin.Handles(operation) +} + +// Admit performs a mutating admission control check and emit metrics. +func (p pluginHandlerWithNamespaceLabelConditions) Admit(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + if !p.shouldRunAdmission(a) { + return nil + } + + mutatingHandler, ok := p.admissionPlugin.(admission.MutationInterface) + if !ok { + return nil + } + return mutatingHandler.Admit(ctx, a, o) +} + +// Validate performs a non-mutating admission control check and emits metrics. +func (p pluginHandlerWithNamespaceLabelConditions) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + if !p.shouldRunAdmission(a) { + return nil + } + + validatingHandler, ok := p.admissionPlugin.(admission.ValidationInterface) + if !ok { + return nil + } + return validatingHandler.Validate(ctx, a, o) +} + +// MatchNamespaceSelector decideds whether the request matches the +// namespaceSelctor of the webhook. Only when they match, the webhook is called. +func (p pluginHandlerWithNamespaceLabelConditions) shouldRunAdmission(attr admission.Attributes) bool { + namespaceName := attr.GetNamespace() + if len(namespaceName) == 0 && attr.GetResource().Resource != "namespaces" { + // cluster scoped resources always run admission + return true + } + namespaceLabels, err := p.getNamespaceLabels(attr) + if err != nil { + // default to running the hook so we don't leak namespace existence information + return true + } + // TODO: adding an LRU cache to cache the match decision + return p.namespaceSelector.Matches(labels.Set(namespaceLabels)) +} + +// getNamespaceLabels gets the labels of the namespace related to the attr. +func (p pluginHandlerWithNamespaceLabelConditions) getNamespaceLabels(attr admission.Attributes) (map[string]string, error) { + // If the request itself is creating or updating a namespace, then get the + // labels from attr.Object, because namespaceLister doesn't have the latest + // namespace yet. + // + // However, if the request is deleting a namespace, then get the label from + // the namespace in the namespaceLister, because a delete request is not + // going to change the object, and attr.Object will be a DeleteOptions + // rather than a namespace object. + if attr.GetResource().Resource == "namespaces" && + len(attr.GetSubresource()) == 0 && + (attr.GetOperation() == admission.Create || attr.GetOperation() == admission.Update) { + accessor, err := meta.Accessor(attr.GetObject()) + if err != nil { + return nil, err + } + return accessor.GetLabels(), nil + } + + namespaceName := attr.GetNamespace() + namespace, err := p.namespaceLister.Get(namespaceName) + if err != nil && !apierrors.IsNotFound(err) { + return nil, err + } + if apierrors.IsNotFound(err) { + // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not + namespace, err = p.namespaceClient.Namespaces().Get(context.TODO(), namespaceName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + } + return namespace.Labels, nil +} diff --git a/openshift-kube-apiserver/admission/namespaceconditions/labelcondition_test.go b/openshift-kube-apiserver/admission/namespaceconditions/labelcondition_test.go new file mode 100644 index 0000000000000..31474a4b7ee93 --- /dev/null +++ b/openshift-kube-apiserver/admission/namespaceconditions/labelcondition_test.go @@ -0,0 +1,97 @@ +package namespaceconditions + +import ( + "reflect" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" +) + +type fakeNamespaceLister struct { + namespaces map[string]*corev1.Namespace +} + +func (f fakeNamespaceLister) List(selector labels.Selector) (ret []*corev1.Namespace, err error) { + return nil, nil +} +func (f fakeNamespaceLister) Get(name string) (*corev1.Namespace, error) { + ns, ok := f.namespaces[name] + if ok { + return ns, nil + } + return nil, errors.NewNotFound(corev1.Resource("namespaces"), name) +} + +func TestGetNamespaceLabels(t *testing.T) { + namespace1Labels := map[string]string{ + "runlevel": "1", + } + namespace1 := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "1", + Labels: namespace1Labels, + }, + } + namespace2Labels := map[string]string{ + "runlevel": "2", + } + namespace2 := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "2", + Labels: namespace2Labels, + }, + } + namespaceLister := fakeNamespaceLister{map[string]*corev1.Namespace{ + "1": &namespace1, + }, + } + + tests := []struct { + name string + attr admission.Attributes + expectedLabels map[string]string + }{ + { + name: "request is for creating namespace, the labels should be from the object itself", + attr: admission.NewAttributesRecord(&namespace2, nil, schema.GroupVersionKind{}, "", namespace2.Name, schema.GroupVersionResource{Resource: "namespaces"}, "", admission.Create, nil, false, nil), + expectedLabels: namespace2Labels, + }, + { + name: "request is for updating namespace, the labels should be from the new object", + attr: admission.NewAttributesRecord(&namespace2, nil, schema.GroupVersionKind{}, namespace2.Name, namespace2.Name, schema.GroupVersionResource{Resource: "namespaces"}, "", admission.Update, nil, false, nil), + expectedLabels: namespace2Labels, + }, + { + name: "request is for deleting namespace, the labels should be from the cache", + attr: admission.NewAttributesRecord(&namespace2, nil, schema.GroupVersionKind{}, namespace1.Name, namespace1.Name, schema.GroupVersionResource{Resource: "namespaces"}, "", admission.Delete, nil, false, nil), + expectedLabels: namespace1Labels, + }, + { + name: "request is for namespace/finalizer", + attr: admission.NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, namespace1.Name, "mock-name", schema.GroupVersionResource{Resource: "namespaces"}, "finalizers", admission.Create, nil, false, nil), + expectedLabels: namespace1Labels, + }, + { + name: "request is for pod", + attr: admission.NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, namespace1.Name, "mock-name", schema.GroupVersionResource{Resource: "pods"}, "", admission.Create, nil, false, nil), + expectedLabels: namespace1Labels, + }, + } + matcher := pluginHandlerWithNamespaceLabelConditions{ + namespaceLister: namespaceLister, + } + for _, tt := range tests { + actualLabels, err := matcher.getNamespaceLabels(tt.attr) + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(actualLabels, tt.expectedLabels) { + t.Errorf("expected labels to be %#v, got %#v", tt.expectedLabels, actualLabels) + } + } +} diff --git a/openshift-kube-apiserver/admission/namespaceconditions/namecondition.go b/openshift-kube-apiserver/admission/namespaceconditions/namecondition.go new file mode 100644 index 0000000000000..848cef4d13ac4 --- /dev/null +++ b/openshift-kube-apiserver/admission/namespaceconditions/namecondition.go @@ -0,0 +1,60 @@ +package namespaceconditions + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" +) + +// pluginHandlerWithNamespaceNameConditions skips running admission plugins if they deal in the namespaceToExclude list +type pluginHandlerWithNamespaceNameConditions struct { + admissionPlugin admission.Interface + namespacesToExclude sets.String +} + +var _ admission.ValidationInterface = &pluginHandlerWithNamespaceNameConditions{} +var _ admission.MutationInterface = &pluginHandlerWithNamespaceNameConditions{} + +func (p pluginHandlerWithNamespaceNameConditions) Handles(operation admission.Operation) bool { + return p.admissionPlugin.Handles(operation) +} + +// Admit performs a mutating admission control check and emit metrics. +func (p pluginHandlerWithNamespaceNameConditions) Admit(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + if !p.shouldRunAdmission(a) { + return nil + } + + mutatingHandler, ok := p.admissionPlugin.(admission.MutationInterface) + if !ok { + return nil + } + return mutatingHandler.Admit(ctx, a, o) +} + +// Validate performs a non-mutating admission control check and emits metrics. +func (p pluginHandlerWithNamespaceNameConditions) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + if !p.shouldRunAdmission(a) { + return nil + } + + validatingHandler, ok := p.admissionPlugin.(admission.ValidationInterface) + if !ok { + return nil + } + return validatingHandler.Validate(ctx, a, o) +} + +func (p pluginHandlerWithNamespaceNameConditions) shouldRunAdmission(attr admission.Attributes) bool { + namespaceName := attr.GetNamespace() + if p.namespacesToExclude.Has(namespaceName) { + return false + } + if (attr.GetResource().GroupResource() == schema.GroupResource{Resource: "namespaces"}) && p.namespacesToExclude.Has(attr.GetName()) { + return false + } + + return true +} diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/doc.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/doc.go new file mode 100644 index 0000000000000..4ef9330be1224 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package externalipranger is the internal version of the API. +package externalipranger diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/register.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/register.go new file mode 100644 index 0000000000000..fe92abf523c1e --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/register.go @@ -0,0 +1,20 @@ +package externalipranger + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var GroupVersion = schema.GroupVersion{Group: "network.openshift.io", Version: runtime.APIVersionInternal} + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + Install = schemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ExternalIPRangerAdmissionConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/types.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/types.go new file mode 100644 index 0000000000000..f127ca27aadcb --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/types.go @@ -0,0 +1,20 @@ +package externalipranger + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RestrictedEndpointsAdmissionConfig is the configuration for which CIDRs services can't manage +type ExternalIPRangerAdmissionConfig struct { + metav1.TypeMeta + + // ExternalIPNetworkCIDRs controls what values are acceptable for the service external IP field. If empty, no externalIP + // may be set. It may contain a list of CIDRs which are checked for access. If a CIDR is prefixed with !, IPs in that + // CIDR will be rejected. Rejections will be applied first, then the IP checked against one of the allowed CIDRs. You + // should ensure this range does not overlap with your nodes, pods, or service CIDRs for security reasons. + ExternalIPNetworkCIDRs []string + // AllowIngressIP indicates that ingress IPs should be allowed + AllowIngressIP bool +} diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/doc.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/doc.go new file mode 100644 index 0000000000000..79476f394930a --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/externalipranger + +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/register.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/register.go new file mode 100644 index 0000000000000..f55b5a5b494df --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/register.go @@ -0,0 +1,24 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/restrictedendpoints" +) + +var GroupVersion = schema.GroupVersion{Group: "network.openshift.io", Version: "v1"} + +var ( + localSchemeBuilder = runtime.NewSchemeBuilder( + addKnownTypes, + restrictedendpoints.Install, + ) + Install = localSchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ExternalIPRangerAdmissionConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/types.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/types.go new file mode 100644 index 0000000000000..0fb8ea4ca830a --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/types.go @@ -0,0 +1,20 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ExternalIPRangerAdmissionConfig is the configuration for which CIDRs services can't manage +type ExternalIPRangerAdmissionConfig struct { + metav1.TypeMeta `json:",inline"` + + // ExternalIPNetworkCIDRs controls what values are acceptable for the service external IP field. If empty, no externalIP + // may be set. It may contain a list of CIDRs which are checked for access. If a CIDR is prefixed with !, IPs in that + // CIDR will be rejected. Rejections will be applied first, then the IP checked against one of the allowed CIDRs. You + // should ensure this range does not overlap with your nodes, pods, or service CIDRs for security reasons. + ExternalIPNetworkCIDRs []string `json:"externalIPNetworkCIDRs"` + // AllowIngressIP indicates that ingress IPs should be allowed + AllowIngressIP bool `json:"allowIngressIP"` +} diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/zz_generated.conversion.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/zz_generated.conversion.go new file mode 100644 index 0000000000000..34c70eb5aadc4 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/zz_generated.conversion.go @@ -0,0 +1,72 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + unsafe "unsafe" + + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + externalipranger "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/externalipranger" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*ExternalIPRangerAdmissionConfig)(nil), (*externalipranger.ExternalIPRangerAdmissionConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ExternalIPRangerAdmissionConfig_To_externalipranger_ExternalIPRangerAdmissionConfig(a.(*ExternalIPRangerAdmissionConfig), b.(*externalipranger.ExternalIPRangerAdmissionConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*externalipranger.ExternalIPRangerAdmissionConfig)(nil), (*ExternalIPRangerAdmissionConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_externalipranger_ExternalIPRangerAdmissionConfig_To_v1_ExternalIPRangerAdmissionConfig(a.(*externalipranger.ExternalIPRangerAdmissionConfig), b.(*ExternalIPRangerAdmissionConfig), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_ExternalIPRangerAdmissionConfig_To_externalipranger_ExternalIPRangerAdmissionConfig(in *ExternalIPRangerAdmissionConfig, out *externalipranger.ExternalIPRangerAdmissionConfig, s conversion.Scope) error { + out.ExternalIPNetworkCIDRs = *(*[]string)(unsafe.Pointer(&in.ExternalIPNetworkCIDRs)) + out.AllowIngressIP = in.AllowIngressIP + return nil +} + +// Convert_v1_ExternalIPRangerAdmissionConfig_To_externalipranger_ExternalIPRangerAdmissionConfig is an autogenerated conversion function. +func Convert_v1_ExternalIPRangerAdmissionConfig_To_externalipranger_ExternalIPRangerAdmissionConfig(in *ExternalIPRangerAdmissionConfig, out *externalipranger.ExternalIPRangerAdmissionConfig, s conversion.Scope) error { + return autoConvert_v1_ExternalIPRangerAdmissionConfig_To_externalipranger_ExternalIPRangerAdmissionConfig(in, out, s) +} + +func autoConvert_externalipranger_ExternalIPRangerAdmissionConfig_To_v1_ExternalIPRangerAdmissionConfig(in *externalipranger.ExternalIPRangerAdmissionConfig, out *ExternalIPRangerAdmissionConfig, s conversion.Scope) error { + out.ExternalIPNetworkCIDRs = *(*[]string)(unsafe.Pointer(&in.ExternalIPNetworkCIDRs)) + out.AllowIngressIP = in.AllowIngressIP + return nil +} + +// Convert_externalipranger_ExternalIPRangerAdmissionConfig_To_v1_ExternalIPRangerAdmissionConfig is an autogenerated conversion function. +func Convert_externalipranger_ExternalIPRangerAdmissionConfig_To_v1_ExternalIPRangerAdmissionConfig(in *externalipranger.ExternalIPRangerAdmissionConfig, out *ExternalIPRangerAdmissionConfig, s conversion.Scope) error { + return autoConvert_externalipranger_ExternalIPRangerAdmissionConfig_To_v1_ExternalIPRangerAdmissionConfig(in, out, s) +} diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..7e5bf419ad07c --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/zz_generated.deepcopy.go @@ -0,0 +1,56 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPRangerAdmissionConfig) DeepCopyInto(out *ExternalIPRangerAdmissionConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ExternalIPNetworkCIDRs != nil { + in, out := &in.ExternalIPNetworkCIDRs, &out.ExternalIPNetworkCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPRangerAdmissionConfig. +func (in *ExternalIPRangerAdmissionConfig) DeepCopy() *ExternalIPRangerAdmissionConfig { + if in == nil { + return nil + } + out := new(ExternalIPRangerAdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExternalIPRangerAdmissionConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..b551e55df1d0e --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/zz_generated.deepcopy.go @@ -0,0 +1,56 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package externalipranger + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPRangerAdmissionConfig) DeepCopyInto(out *ExternalIPRangerAdmissionConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ExternalIPNetworkCIDRs != nil { + in, out := &in.ExternalIPNetworkCIDRs, &out.ExternalIPNetworkCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPRangerAdmissionConfig. +func (in *ExternalIPRangerAdmissionConfig) DeepCopy() *ExternalIPRangerAdmissionConfig { + if in == nil { + return nil + } + out := new(ExternalIPRangerAdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExternalIPRangerAdmissionConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/doc.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/doc.go new file mode 100644 index 0000000000000..ff46fb9f13d76 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package restrictedendpoints is the internal version of the API. +package restrictedendpoints diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/register.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/register.go new file mode 100644 index 0000000000000..171a4b1be5182 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/register.go @@ -0,0 +1,20 @@ +package restrictedendpoints + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var GroupVersion = schema.GroupVersion{Group: "network.openshift.io", Version: runtime.APIVersionInternal} + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + Install = schemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &RestrictedEndpointsAdmissionConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/types.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/types.go new file mode 100644 index 0000000000000..e205762215ba1 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/types.go @@ -0,0 +1,15 @@ +package restrictedendpoints + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RestrictedEndpointsAdmissionConfig is the configuration for which CIDRs services can't manage +type RestrictedEndpointsAdmissionConfig struct { + metav1.TypeMeta + + // RestrictedCIDRs indicates what CIDRs will be disallowed for services. + RestrictedCIDRs []string +} diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/doc.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/doc.go new file mode 100644 index 0000000000000..0dac22208df49 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/restrictedendpoints + +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/register.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/register.go new file mode 100644 index 0000000000000..f924353fe24d3 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/register.go @@ -0,0 +1,24 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/restrictedendpoints" +) + +var GroupVersion = schema.GroupVersion{Group: "network.openshift.io", Version: "v1"} + +var ( + localSchemeBuilder = runtime.NewSchemeBuilder( + addKnownTypes, + restrictedendpoints.Install, + ) + Install = localSchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &RestrictedEndpointsAdmissionConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/types.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/types.go new file mode 100644 index 0000000000000..f665aa1e73c2f --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/types.go @@ -0,0 +1,15 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RestrictedEndpointsAdmissionConfig is the configuration for which CIDRs services can't manage +type RestrictedEndpointsAdmissionConfig struct { + metav1.TypeMeta `json:",inline"` + + // RestrictedCIDRs indicates what CIDRs will be disallowed for services. + RestrictedCIDRs []string `json:"restrictedCIDRs"` +} diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/zz_generated.conversion.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/zz_generated.conversion.go new file mode 100644 index 0000000000000..0dd1360d0607a --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/zz_generated.conversion.go @@ -0,0 +1,70 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + unsafe "unsafe" + + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + restrictedendpoints "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/restrictedendpoints" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*RestrictedEndpointsAdmissionConfig)(nil), (*restrictedendpoints.RestrictedEndpointsAdmissionConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_RestrictedEndpointsAdmissionConfig_To_restrictedendpoints_RestrictedEndpointsAdmissionConfig(a.(*RestrictedEndpointsAdmissionConfig), b.(*restrictedendpoints.RestrictedEndpointsAdmissionConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*restrictedendpoints.RestrictedEndpointsAdmissionConfig)(nil), (*RestrictedEndpointsAdmissionConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_restrictedendpoints_RestrictedEndpointsAdmissionConfig_To_v1_RestrictedEndpointsAdmissionConfig(a.(*restrictedendpoints.RestrictedEndpointsAdmissionConfig), b.(*RestrictedEndpointsAdmissionConfig), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_RestrictedEndpointsAdmissionConfig_To_restrictedendpoints_RestrictedEndpointsAdmissionConfig(in *RestrictedEndpointsAdmissionConfig, out *restrictedendpoints.RestrictedEndpointsAdmissionConfig, s conversion.Scope) error { + out.RestrictedCIDRs = *(*[]string)(unsafe.Pointer(&in.RestrictedCIDRs)) + return nil +} + +// Convert_v1_RestrictedEndpointsAdmissionConfig_To_restrictedendpoints_RestrictedEndpointsAdmissionConfig is an autogenerated conversion function. +func Convert_v1_RestrictedEndpointsAdmissionConfig_To_restrictedendpoints_RestrictedEndpointsAdmissionConfig(in *RestrictedEndpointsAdmissionConfig, out *restrictedendpoints.RestrictedEndpointsAdmissionConfig, s conversion.Scope) error { + return autoConvert_v1_RestrictedEndpointsAdmissionConfig_To_restrictedendpoints_RestrictedEndpointsAdmissionConfig(in, out, s) +} + +func autoConvert_restrictedendpoints_RestrictedEndpointsAdmissionConfig_To_v1_RestrictedEndpointsAdmissionConfig(in *restrictedendpoints.RestrictedEndpointsAdmissionConfig, out *RestrictedEndpointsAdmissionConfig, s conversion.Scope) error { + out.RestrictedCIDRs = *(*[]string)(unsafe.Pointer(&in.RestrictedCIDRs)) + return nil +} + +// Convert_restrictedendpoints_RestrictedEndpointsAdmissionConfig_To_v1_RestrictedEndpointsAdmissionConfig is an autogenerated conversion function. +func Convert_restrictedendpoints_RestrictedEndpointsAdmissionConfig_To_v1_RestrictedEndpointsAdmissionConfig(in *restrictedendpoints.RestrictedEndpointsAdmissionConfig, out *RestrictedEndpointsAdmissionConfig, s conversion.Scope) error { + return autoConvert_restrictedendpoints_RestrictedEndpointsAdmissionConfig_To_v1_RestrictedEndpointsAdmissionConfig(in, out, s) +} diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..32b95e73bc29f --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/zz_generated.deepcopy.go @@ -0,0 +1,56 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestrictedEndpointsAdmissionConfig) DeepCopyInto(out *RestrictedEndpointsAdmissionConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.RestrictedCIDRs != nil { + in, out := &in.RestrictedCIDRs, &out.RestrictedCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestrictedEndpointsAdmissionConfig. +func (in *RestrictedEndpointsAdmissionConfig) DeepCopy() *RestrictedEndpointsAdmissionConfig { + if in == nil { + return nil + } + out := new(RestrictedEndpointsAdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RestrictedEndpointsAdmissionConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..e982f26d234f7 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/zz_generated.deepcopy.go @@ -0,0 +1,56 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package restrictedendpoints + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestrictedEndpointsAdmissionConfig) DeepCopyInto(out *RestrictedEndpointsAdmissionConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.RestrictedCIDRs != nil { + in, out := &in.RestrictedCIDRs, &out.RestrictedCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestrictedEndpointsAdmissionConfig. +func (in *RestrictedEndpointsAdmissionConfig) DeepCopy() *RestrictedEndpointsAdmissionConfig { + if in == nil { + return nil + } + out := new(RestrictedEndpointsAdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RestrictedEndpointsAdmissionConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/network/externalipranger/externalip_admission.go b/openshift-kube-apiserver/admission/network/externalipranger/externalip_admission.go new file mode 100644 index 0000000000000..63c26a833c8f0 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/externalipranger/externalip_admission.go @@ -0,0 +1,209 @@ +package externalipranger + +import ( + "context" + "fmt" + "io" + "net" + "strings" + + "github.com/openshift/library-go/pkg/config/helpers" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/klog/v2" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/externalipranger" + v1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/externalipranger/v1" + kapi "k8s.io/kubernetes/pkg/apis/core" + netutils "k8s.io/utils/net" +) + +const ExternalIPPluginName = "network.openshift.io/ExternalIPRanger" + +func RegisterExternalIP(plugins *admission.Plugins) { + plugins.Register("network.openshift.io/ExternalIPRanger", + func(config io.Reader) (admission.Interface, error) { + pluginConfig, err := readConfig(config) + if err != nil { + return nil, err + } + if pluginConfig == nil { + klog.Infof("Admission plugin %q is not configured so it will be disabled.", ExternalIPPluginName) + return nil, nil + } + + // this needs to be moved upstream to be part of core config + reject, admit, err := ParseRejectAdmitCIDRRules(pluginConfig.ExternalIPNetworkCIDRs) + if err != nil { + // should have been caught with validation + return nil, err + } + + return NewExternalIPRanger(reject, admit, pluginConfig.AllowIngressIP), nil + }) +} + +func readConfig(reader io.Reader) (*externalipranger.ExternalIPRangerAdmissionConfig, error) { + obj, err := helpers.ReadYAMLToInternal(reader, externalipranger.Install, v1.Install) + if err != nil { + return nil, err + } + if obj == nil { + return nil, nil + } + config, ok := obj.(*externalipranger.ExternalIPRangerAdmissionConfig) + if !ok { + return nil, fmt.Errorf("unexpected config object: %#v", obj) + } + // No validation needed since config is just list of strings + return config, nil +} + +type externalIPRanger struct { + *admission.Handler + reject []*net.IPNet + admit []*net.IPNet + authorizer authorizer.Authorizer + allowIngressIP bool +} + +var _ admission.Interface = &externalIPRanger{} +var _ admission.ValidationInterface = &externalIPRanger{} +var _ = initializer.WantsAuthorizer(&externalIPRanger{}) + +// ParseRejectAdmitCIDRRules calculates a blacklist and whitelist from a list of string CIDR rules (treating +// a leading ! as a negation). Returns an error if any rule is invalid. +func ParseRejectAdmitCIDRRules(rules []string) (reject, admit []*net.IPNet, err error) { + for _, s := range rules { + negate := false + if strings.HasPrefix(s, "!") { + negate = true + s = s[1:] + } + _, cidr, err := netutils.ParseCIDRSloppy(s) + if err != nil { + return nil, nil, err + } + if negate { + reject = append(reject, cidr) + } else { + admit = append(admit, cidr) + } + } + return reject, admit, nil +} + +// NewConstraint creates a new SCC constraint admission plugin. +func NewExternalIPRanger(reject, admit []*net.IPNet, allowIngressIP bool) *externalIPRanger { + return &externalIPRanger{ + Handler: admission.NewHandler(admission.Create, admission.Update), + reject: reject, + admit: admit, + allowIngressIP: allowIngressIP, + } +} + +func (r *externalIPRanger) SetAuthorizer(a authorizer.Authorizer) { + r.authorizer = a +} + +func (r *externalIPRanger) ValidateInitialization() error { + if r.authorizer == nil { + return fmt.Errorf("missing authorizer") + } + return nil +} + +// NetworkSlice is a helper for checking whether an IP is contained in a range +// of networks. +type NetworkSlice []*net.IPNet + +func (s NetworkSlice) Contains(ip net.IP) bool { + for _, cidr := range s { + if cidr.Contains(ip) { + return true + } + } + return false +} + +// Admit determines if the service should be admitted based on the configured network CIDR. +func (r *externalIPRanger) Validate(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) error { + if a.GetResource().GroupResource() != kapi.Resource("services") { + return nil + } + + svc, ok := a.GetObject().(*kapi.Service) + // if we can't convert then we don't handle this object so just return + if !ok { + return nil + } + + // Determine if an ingress ip address should be allowed as an + // external ip by checking the loadbalancer status of the previous + // object state. Only updates need to be validated against the + // ingress ip since the loadbalancer status cannot be set on + // create. + ingressIP := "" + retrieveIngressIP := a.GetOperation() == admission.Update && + r.allowIngressIP && svc.Spec.Type == kapi.ServiceTypeLoadBalancer + if retrieveIngressIP { + old, ok := a.GetOldObject().(*kapi.Service) + ipPresent := ok && old != nil && len(old.Status.LoadBalancer.Ingress) > 0 + if ipPresent { + ingressIP = old.Status.LoadBalancer.Ingress[0].IP + } + } + + var errs field.ErrorList + switch { + // administrator disabled externalIPs + case len(svc.Spec.ExternalIPs) > 0 && len(r.admit) == 0: + onlyIngressIP := len(svc.Spec.ExternalIPs) == 1 && svc.Spec.ExternalIPs[0] == ingressIP + if !onlyIngressIP { + errs = append(errs, field.Forbidden(field.NewPath("spec", "externalIPs"), "externalIPs have been disabled")) + } + // administrator has limited the range + case len(svc.Spec.ExternalIPs) > 0 && len(r.admit) > 0: + for i, s := range svc.Spec.ExternalIPs { + ip := netutils.ParseIPSloppy(s) + if ip == nil { + errs = append(errs, field.Forbidden(field.NewPath("spec", "externalIPs").Index(i), "externalIPs must be a valid address")) + continue + } + notIngressIP := s != ingressIP + if (NetworkSlice(r.reject).Contains(ip) || !NetworkSlice(r.admit).Contains(ip)) && notIngressIP { + errs = append(errs, field.Forbidden(field.NewPath("spec", "externalIPs").Index(i), "externalIP is not allowed")) + continue + } + } + } + + if len(errs) > 0 { + //if there are errors reported, resort to RBAC check to see + //if this is an admin user who can over-ride the check + allow, err := r.checkAccess(ctx, a) + if err != nil { + return err + } + if !allow { + return admission.NewForbidden(a, errs.ToAggregate()) + } + } + + return nil +} + +func (r *externalIPRanger) checkAccess(ctx context.Context, attr admission.Attributes) (bool, error) { + authzAttr := authorizer.AttributesRecord{ + User: attr.GetUserInfo(), + Verb: "create", + Resource: "service", + Subresource: "externalips", + APIGroup: "network.openshift.io", + ResourceRequest: true, + } + authorized, _, err := r.authorizer.Authorize(ctx, authzAttr) + return authorized == authorizer.DecisionAllow, err +} diff --git a/openshift-kube-apiserver/admission/network/externalipranger/externalip_admission_test.go b/openshift-kube-apiserver/admission/network/externalipranger/externalip_admission_test.go new file mode 100644 index 0000000000000..c29e3abe4f01e --- /dev/null +++ b/openshift-kube-apiserver/admission/network/externalipranger/externalip_admission_test.go @@ -0,0 +1,322 @@ +package externalipranger + +import ( + "context" + "fmt" + "net" + "strings" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authorization/authorizer" + netutils "k8s.io/utils/net" + + "k8s.io/apiserver/pkg/authentication/user" + + "k8s.io/apiserver/pkg/authentication/serviceaccount" + kapi "k8s.io/kubernetes/pkg/apis/core" +) + +type fakeTestAuthorizer struct { + t *testing.T +} + +func fakeAuthorizer(t *testing.T) authorizer.Authorizer { + return &fakeTestAuthorizer{ + t: t, + } +} + +func (a *fakeTestAuthorizer) Authorize(_ context.Context, attributes authorizer.Attributes) (authorizer.Decision, string, error) { + ui := attributes.GetUser() + if ui == nil { + return authorizer.DecisionNoOpinion, "", fmt.Errorf("No valid UserInfo for Context") + } + // system:serviceaccount:test:admin user aka admin user is allowed to set + // external IPs + if ui.GetName() == "system:serviceaccount:test:admin" { + return authorizer.DecisionAllow, "", nil + } + // Non test:admin user aka without admin privileges: + return authorizer.DecisionDeny, "", nil +} + +// TestAdmission verifies various scenarios involving pod/project/global node label selectors +func TestAdmission(t *testing.T) { + svc := &kapi.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + } + var oldSvc *kapi.Service + + _, ipv4, err := netutils.ParseCIDRSloppy("172.0.0.0/16") + if err != nil { + t.Fatal(err) + } + _, ipv4subset, err := netutils.ParseCIDRSloppy("172.0.1.0/24") + if err != nil { + t.Fatal(err) + } + _, ipv4offset, err := netutils.ParseCIDRSloppy("172.200.0.0/24") + if err != nil { + t.Fatal(err) + } + _, none, err := netutils.ParseCIDRSloppy("0.0.0.0/32") + if err != nil { + t.Fatal(err) + } + _, all, err := netutils.ParseCIDRSloppy("0.0.0.0/0") + if err != nil { + t.Fatal(err) + } + + tests := []struct { + testName string + rejects, admits []*net.IPNet + op admission.Operation + externalIPs []string + admit bool + errFn func(err error) bool + loadBalancer bool + ingressIP string + userinfo user.Info + }{ + { + admit: true, + op: admission.Create, + testName: "No external IPs on create for test:ordinary-user user", + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: true, + op: admission.Update, + testName: "No external IPs on update for test:admin user", + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: false, + externalIPs: []string{"1.2.3.4"}, + op: admission.Create, + testName: "No external IPs allowed on create for test:ordinary-user user", + errFn: func(err error) bool { return strings.Contains(err.Error(), "externalIPs have been disabled") }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: true, + externalIPs: []string{"1.2.3.4"}, + op: admission.Create, + testName: "External IPs allowed on create for test:admin user", + userinfo: serviceaccount.UserInfo("test", "admin", ""), + }, + { + admit: false, + externalIPs: []string{"1.2.3.4"}, + op: admission.Update, + testName: "No external IPs allowed on update", + errFn: func(err error) bool { return strings.Contains(err.Error(), "externalIPs have been disabled") }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: true, + externalIPs: []string{"1.2.3.4"}, + op: admission.Update, + testName: "External IPs allowed on update for test:admin user", + userinfo: serviceaccount.UserInfo("test", "admin", ""), + }, + { + admit: false, + admits: []*net.IPNet{ipv4}, + externalIPs: []string{"1.2.3.4"}, + op: admission.Create, + testName: "IP out of range on create", + errFn: func(err error) bool { + return strings.Contains(err.Error(), "externalIP is not allowed") && + strings.Contains(err.Error(), "spec.externalIPs[0]") + }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: false, + admits: []*net.IPNet{ipv4}, + externalIPs: []string{"1.2.3.4"}, + op: admission.Update, + testName: "IP out of range on update", + errFn: func(err error) bool { + return strings.Contains(err.Error(), "externalIP is not allowed") && + strings.Contains(err.Error(), "spec.externalIPs[0]") + }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: false, + admits: []*net.IPNet{ipv4}, + rejects: []*net.IPNet{ipv4subset}, + externalIPs: []string{"172.0.1.1"}, + op: admission.Update, + testName: "IP out of range due to blacklist", + errFn: func(err error) bool { + return strings.Contains(err.Error(), "externalIP is not allowed") && + strings.Contains(err.Error(), "spec.externalIPs[0]") + }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: false, + admits: []*net.IPNet{ipv4}, + rejects: []*net.IPNet{ipv4offset}, + externalIPs: []string{"172.199.1.1"}, + op: admission.Update, + testName: "IP not in reject or admit", + errFn: func(err error) bool { + return strings.Contains(err.Error(), "externalIP is not allowed") && + strings.Contains(err.Error(), "spec.externalIPs[0]") + }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: true, + admits: []*net.IPNet{ipv4}, + externalIPs: []string{"172.0.0.1"}, + op: admission.Create, + testName: "IP in range on create for test:ordinary-user user", + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: true, + admits: []*net.IPNet{ipv4}, + externalIPs: []string{"172.0.0.1"}, + op: admission.Update, + testName: "IP in range on update for test:admin user", + userinfo: serviceaccount.UserInfo("test", "admin", ""), + }, + // other checks + { + admit: false, + admits: []*net.IPNet{ipv4}, + externalIPs: []string{"abcd"}, + op: admission.Create, + testName: "IP unparseable on create", + errFn: func(err error) bool { + return strings.Contains(err.Error(), "externalIPs must be a valid address") && + strings.Contains(err.Error(), "spec.externalIPs[0]") + }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: false, + admits: []*net.IPNet{none}, + externalIPs: []string{"1.2.3.4"}, + op: admission.Create, + testName: "IP range is empty for test:ordinary-user user", + errFn: func(err error) bool { return strings.Contains(err.Error(), "externalIP is not allowed") }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: true, + admits: []*net.IPNet{none}, + externalIPs: []string{"1.2.3.4"}, + op: admission.Create, + testName: "IP range is empty, but test:admin user allowed", + userinfo: serviceaccount.UserInfo("test", "admin", ""), + }, + { + admit: false, + rejects: []*net.IPNet{all}, + admits: []*net.IPNet{all}, + externalIPs: []string{"1.2.3.4"}, + op: admission.Create, + testName: "rejections can cover the entire range", + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + // Ingress IP checks + { + admit: true, + externalIPs: []string{"1.2.3.4"}, + op: admission.Update, + testName: "Ingress ip allowed when external ips are disabled", + loadBalancer: true, + ingressIP: "1.2.3.4", + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: true, + admits: []*net.IPNet{ipv4}, + externalIPs: []string{"1.2.3.4", "172.0.0.1"}, + op: admission.Update, + testName: "Ingress ip allowed when external ips are enabled", + loadBalancer: true, + ingressIP: "1.2.3.4", + userinfo: serviceaccount.UserInfo("test", "admin", ""), + }, + { + admit: false, + admits: []*net.IPNet{ipv4}, + externalIPs: []string{"1.2.3.4", "172.0.0.1"}, + op: admission.Update, + testName: "Ingress ip not allowed for non-lb service", + loadBalancer: false, + ingressIP: "1.2.3.4", + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + } + for _, test := range tests { + svc.Spec.ExternalIPs = test.externalIPs + allowIngressIP := len(test.ingressIP) > 0 || test.loadBalancer + handler := NewExternalIPRanger(test.rejects, test.admits, allowIngressIP) + handler.SetAuthorizer(fakeAuthorizer(t)) + err := handler.ValidateInitialization() + if err != nil { + t.Errorf("%s: Got an error %s", test.testName, err) + continue + } + if test.loadBalancer { + svc.Spec.Type = kapi.ServiceTypeLoadBalancer + } else { + svc.Spec.Type = kapi.ServiceTypeClusterIP + } + + if len(test.ingressIP) > 0 { + // Provide an ingress ip via the previous object state + oldSvc = &kapi.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Status: kapi.ServiceStatus{ + LoadBalancer: kapi.LoadBalancerStatus{ + Ingress: []kapi.LoadBalancerIngress{ + { + IP: test.ingressIP, + }, + }, + }, + }, + } + + } else { + oldSvc = nil + } + + err = handler.Validate(context.TODO(), admission.NewAttributesRecord(svc, oldSvc, kapi.Kind("Service").WithVersion("version"), "namespace", svc.ObjectMeta.Name, kapi.Resource("services").WithVersion("version"), "", test.op, nil, false, test.userinfo), nil) + + if test.admit && err != nil { + t.Errorf("%s: expected no error but got: %s", test.testName, err) + } else if !test.admit && err == nil { + t.Errorf("%s: expected an error", test.testName) + } + if test.errFn != nil && !test.errFn(err) { + t.Errorf("%s: unexpected error: %v", test.testName, err) + } + } +} + +func TestHandles(t *testing.T) { + for op, shouldHandle := range map[admission.Operation]bool{ + admission.Create: true, + admission.Update: true, + admission.Connect: false, + admission.Delete: false, + } { + ranger := NewExternalIPRanger(nil, nil, false) + if e, a := shouldHandle, ranger.Handles(op); e != a { + t.Errorf("%v: shouldHandle=%t, handles=%t", op, e, a) + } + } +} diff --git a/openshift-kube-apiserver/admission/network/restrictedendpoints/endpoint_admission.go b/openshift-kube-apiserver/admission/network/restrictedendpoints/endpoint_admission.go new file mode 100644 index 0000000000000..b61b2a0bd62ed --- /dev/null +++ b/openshift-kube-apiserver/admission/network/restrictedendpoints/endpoint_admission.go @@ -0,0 +1,292 @@ +package restrictedendpoints + +import ( + "context" + "fmt" + "io" + "net" + "reflect" + + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/klog/v2" + kapi "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/discovery" + netutils "k8s.io/utils/net" + + "github.com/openshift/library-go/pkg/config/helpers" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/restrictedendpoints" + v1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1" +) + +const RestrictedEndpointsPluginName = "network.openshift.io/RestrictedEndpointsAdmission" + +func RegisterRestrictedEndpoints(plugins *admission.Plugins) { + plugins.Register(RestrictedEndpointsPluginName, + func(config io.Reader) (admission.Interface, error) { + pluginConfig, err := readConfig(config) + if err != nil { + return nil, err + } + if pluginConfig == nil { + klog.Infof("Admission plugin %q is not configured so it will be disabled.", RestrictedEndpointsPluginName) + return nil, nil + } + restrictedNetworks, err := ParseSimpleCIDRRules(pluginConfig.RestrictedCIDRs) + if err != nil { + // should have been caught with validation + return nil, err + } + + return NewRestrictedEndpointsAdmission(restrictedNetworks), nil + }) +} + +func readConfig(reader io.Reader) (*restrictedendpoints.RestrictedEndpointsAdmissionConfig, error) { + obj, err := helpers.ReadYAMLToInternal(reader, restrictedendpoints.Install, v1.Install) + if err != nil { + return nil, err + } + if obj == nil { + return nil, nil + } + config, ok := obj.(*restrictedendpoints.RestrictedEndpointsAdmissionConfig) + if !ok { + return nil, fmt.Errorf("unexpected config object: %#v", obj) + } + // No validation needed since config is just list of strings + return config, nil +} + +type restrictedEndpointsAdmission struct { + *admission.Handler + + authorizer authorizer.Authorizer + restrictedNetworks []*net.IPNet +} + +var _ = initializer.WantsAuthorizer(&restrictedEndpointsAdmission{}) +var _ = admission.ValidationInterface(&restrictedEndpointsAdmission{}) + +// ParseSimpleCIDRRules parses a list of CIDR strings +func ParseSimpleCIDRRules(rules []string) (networks []*net.IPNet, err error) { + for _, s := range rules { + _, cidr, err := netutils.ParseCIDRSloppy(s) + if err != nil { + return nil, err + } + networks = append(networks, cidr) + } + return networks, nil +} + +// NewRestrictedEndpointsAdmission creates a new endpoints admission plugin. +func NewRestrictedEndpointsAdmission(restrictedNetworks []*net.IPNet) *restrictedEndpointsAdmission { + return &restrictedEndpointsAdmission{ + Handler: admission.NewHandler(admission.Create, admission.Update), + restrictedNetworks: restrictedNetworks, + } +} + +func (r *restrictedEndpointsAdmission) SetAuthorizer(a authorizer.Authorizer) { + r.authorizer = a +} + +func (r *restrictedEndpointsAdmission) ValidateInitialization() error { + if r.authorizer == nil { + return fmt.Errorf("missing authorizer") + } + return nil +} + +var ( + defaultRestrictedPorts = []kapi.EndpointPort{ + // MCS ports + {Protocol: kapi.ProtocolTCP, Port: 22623}, + {Protocol: kapi.ProtocolTCP, Port: 22624}, + } + defaultRestrictedNetworks = []*net.IPNet{ + // IPv4 link-local range 169.254.0.0/16 (including cloud metadata IP) + {IP: netutils.ParseIPSloppy("169.254.0.0"), Mask: net.CIDRMask(16, 32)}, + } +) + +func checkRestrictedIP(ipString string, restricted []*net.IPNet) error { + ip := netutils.ParseIPSloppy(ipString) + if ip == nil { + return nil + } + for _, net := range restricted { + if net.Contains(ip) { + return fmt.Errorf("endpoint address %s is not allowed", ipString) + } + } + return nil +} + +func checkRestrictedPort(protocol kapi.Protocol, port int32, restricted []kapi.EndpointPort) error { + for _, rport := range restricted { + if protocol == rport.Protocol && port == rport.Port { + return fmt.Errorf("endpoint port %s:%d is not allowed", protocol, port) + } + } + return nil +} + +func (r *restrictedEndpointsAdmission) endpointsFindRestrictedIP(ep *kapi.Endpoints, restricted []*net.IPNet) error { + for _, subset := range ep.Subsets { + for _, addr := range subset.Addresses { + if err := checkRestrictedIP(addr.IP, restricted); err != nil { + return err + } + } + for _, addr := range subset.NotReadyAddresses { + if err := checkRestrictedIP(addr.IP, restricted); err != nil { + return err + } + } + } + return nil +} + +func (r *restrictedEndpointsAdmission) endpointsFindRestrictedPort(ep *kapi.Endpoints, restricted []kapi.EndpointPort) error { + for _, subset := range ep.Subsets { + for _, port := range subset.Ports { + if err := checkRestrictedPort(port.Protocol, port.Port, restricted); err != nil { + return err + } + } + } + return nil +} + +func (r *restrictedEndpointsAdmission) endpointsCheckAccess(ctx context.Context, attr admission.Attributes) (bool, error) { + authzAttr := authorizer.AttributesRecord{ + User: attr.GetUserInfo(), + Verb: "create", + Namespace: attr.GetNamespace(), + Resource: "endpoints", + Subresource: "restricted", + APIGroup: kapi.GroupName, + Name: attr.GetName(), + ResourceRequest: true, + } + authorized, _, err := r.authorizer.Authorize(ctx, authzAttr) + return authorized == authorizer.DecisionAllow, err +} + +func (r *restrictedEndpointsAdmission) endpointsValidate(ctx context.Context, a admission.Attributes) error { + ep, ok := a.GetObject().(*kapi.Endpoints) + if !ok { + return nil + } + old, ok := a.GetOldObject().(*kapi.Endpoints) + if ok && reflect.DeepEqual(ep.Subsets, old.Subsets) { + return nil + } + + restrictedErr := r.endpointsFindRestrictedIP(ep, r.restrictedNetworks) + if restrictedErr == nil { + restrictedErr = r.endpointsFindRestrictedIP(ep, defaultRestrictedNetworks) + } + if restrictedErr == nil { + restrictedErr = r.endpointsFindRestrictedPort(ep, defaultRestrictedPorts) + } + if restrictedErr == nil { + return nil + } + + allow, err := r.endpointsCheckAccess(ctx, a) + if err != nil { + return err + } + if !allow { + return admission.NewForbidden(a, restrictedErr) + } + return nil +} + +func (r *restrictedEndpointsAdmission) sliceFindRestrictedIP(slice *discovery.EndpointSlice, restricted []*net.IPNet) error { + for _, endpoint := range slice.Endpoints { + for _, addr := range endpoint.Addresses { + if err := checkRestrictedIP(addr, restricted); err != nil { + return err + } + } + } + return nil +} + +func (r *restrictedEndpointsAdmission) sliceFindRestrictedPort(slice *discovery.EndpointSlice, restricted []kapi.EndpointPort) error { + for _, port := range slice.Ports { + if port.Port == nil { + continue + } + sliceProtocol := kapi.ProtocolTCP + if port.Protocol != nil { + sliceProtocol = *port.Protocol + } + if err := checkRestrictedPort(sliceProtocol, *port.Port, restricted); err != nil { + return err + } + } + return nil +} + +func (r *restrictedEndpointsAdmission) sliceCheckAccess(ctx context.Context, attr admission.Attributes) (bool, error) { + authzAttr := authorizer.AttributesRecord{ + User: attr.GetUserInfo(), + Verb: "create", + Namespace: attr.GetNamespace(), + Resource: "endpointslices", + Subresource: "restricted", + APIGroup: discovery.GroupName, + Name: attr.GetName(), + ResourceRequest: true, + } + authorized, _, err := r.authorizer.Authorize(ctx, authzAttr) + return authorized == authorizer.DecisionAllow, err +} + +func (r *restrictedEndpointsAdmission) sliceValidate(ctx context.Context, a admission.Attributes) error { + slice, ok := a.GetObject().(*discovery.EndpointSlice) + if !ok { + return nil + } + old, ok := a.GetOldObject().(*discovery.EndpointSlice) + if ok && reflect.DeepEqual(slice.Endpoints, old.Endpoints) && reflect.DeepEqual(slice.Ports, old.Ports) { + return nil + } + + restrictedErr := r.sliceFindRestrictedIP(slice, r.restrictedNetworks) + if restrictedErr == nil { + restrictedErr = r.sliceFindRestrictedIP(slice, defaultRestrictedNetworks) + } + if restrictedErr == nil { + restrictedErr = r.sliceFindRestrictedPort(slice, defaultRestrictedPorts) + } + if restrictedErr == nil { + return nil + } + + allow, err := r.sliceCheckAccess(ctx, a) + if err != nil { + return err + } + if !allow { + return admission.NewForbidden(a, restrictedErr) + } + return nil +} + +// Validate determines if the endpoints or endpointslice object should be admitted +func (r *restrictedEndpointsAdmission) Validate(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) error { + if a.GetResource().GroupResource() == kapi.Resource("endpoints") { + return r.endpointsValidate(ctx, a) + } else if a.GetResource().GroupResource() == discovery.Resource("endpointslices") { + return r.sliceValidate(ctx, a) + } else { + return nil + } +} diff --git a/openshift-kube-apiserver/admission/route/apis/hostassignment/doc.go b/openshift-kube-apiserver/admission/route/apis/hostassignment/doc.go new file mode 100644 index 0000000000000..1e09e2208b6a2 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/hostassignment/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package hostassignment is the internal version of the API. +package hostassignment diff --git a/openshift-kube-apiserver/admission/route/apis/hostassignment/register.go b/openshift-kube-apiserver/admission/route/apis/hostassignment/register.go new file mode 100644 index 0000000000000..d43ac830c6152 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/hostassignment/register.go @@ -0,0 +1,31 @@ +package hostassignment + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var GroupVersion = schema.GroupVersion{Group: "route.openshift.io", Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return GroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return GroupVersion.WithResource(resource).GroupResource() +} + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + Install = schemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &HostAssignmentAdmissionConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/route/apis/hostassignment/types.go b/openshift-kube-apiserver/admission/route/apis/hostassignment/types.go new file mode 100644 index 0000000000000..05b11cf541cac --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/hostassignment/types.go @@ -0,0 +1,17 @@ +package hostassignment + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HostAssignmentAdmissionConfig is the configuration for the the route host assignment plugin. +type HostAssignmentAdmissionConfig struct { + metav1.TypeMeta + + // domain is used to generate a default host name for a route when the + // route's host name is empty. The generated host name will follow this + // pattern: "..". + Domain string +} diff --git a/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/doc.go b/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/doc.go new file mode 100644 index 0000000000000..07ffba69df66e --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/hostassignment + +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/register.go b/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/register.go new file mode 100644 index 0000000000000..4db9b98bd4f31 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/register.go @@ -0,0 +1,64 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName specifies the group name used to register the objects. +const GroupName = "route.openshift.io" + +// GroupVersion specifies the group and the version used to register the objects. +var GroupVersion = v1.GroupVersion{Group: GroupName, Version: "v1"} + +// SchemeGroupVersion is group version used to register these objects +// Deprecated: use GroupVersion instead. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + // Depreciated: use Install instead + AddToScheme = localSchemeBuilder.AddToScheme + Install = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &HostAssignmentAdmissionConfig{}, + ) + // AddToGroupVersion allows the serialization of client types like ListOptions. + v1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/types.go b/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/types.go new file mode 100644 index 0000000000000..0537567d18355 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/types.go @@ -0,0 +1,17 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HostAssignmentAdmissionConfig is the configuration for the the route host assignment plugin. +type HostAssignmentAdmissionConfig struct { + metav1.TypeMeta `json:",inline"` + + // domain is used to generate a default host name for a route when the + // route's host name is empty. The generated host name will follow this + // pattern: "..". + Domain string `json:"domain"` +} diff --git a/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/zz_generated.conversion.go b/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/zz_generated.conversion.go new file mode 100644 index 0000000000000..fd998501e97fd --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/zz_generated.conversion.go @@ -0,0 +1,68 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + hostassignment "k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/hostassignment" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*HostAssignmentAdmissionConfig)(nil), (*hostassignment.HostAssignmentAdmissionConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_HostAssignmentAdmissionConfig_To_hostassignment_HostAssignmentAdmissionConfig(a.(*HostAssignmentAdmissionConfig), b.(*hostassignment.HostAssignmentAdmissionConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*hostassignment.HostAssignmentAdmissionConfig)(nil), (*HostAssignmentAdmissionConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_hostassignment_HostAssignmentAdmissionConfig_To_v1_HostAssignmentAdmissionConfig(a.(*hostassignment.HostAssignmentAdmissionConfig), b.(*HostAssignmentAdmissionConfig), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_HostAssignmentAdmissionConfig_To_hostassignment_HostAssignmentAdmissionConfig(in *HostAssignmentAdmissionConfig, out *hostassignment.HostAssignmentAdmissionConfig, s conversion.Scope) error { + out.Domain = in.Domain + return nil +} + +// Convert_v1_HostAssignmentAdmissionConfig_To_hostassignment_HostAssignmentAdmissionConfig is an autogenerated conversion function. +func Convert_v1_HostAssignmentAdmissionConfig_To_hostassignment_HostAssignmentAdmissionConfig(in *HostAssignmentAdmissionConfig, out *hostassignment.HostAssignmentAdmissionConfig, s conversion.Scope) error { + return autoConvert_v1_HostAssignmentAdmissionConfig_To_hostassignment_HostAssignmentAdmissionConfig(in, out, s) +} + +func autoConvert_hostassignment_HostAssignmentAdmissionConfig_To_v1_HostAssignmentAdmissionConfig(in *hostassignment.HostAssignmentAdmissionConfig, out *HostAssignmentAdmissionConfig, s conversion.Scope) error { + out.Domain = in.Domain + return nil +} + +// Convert_hostassignment_HostAssignmentAdmissionConfig_To_v1_HostAssignmentAdmissionConfig is an autogenerated conversion function. +func Convert_hostassignment_HostAssignmentAdmissionConfig_To_v1_HostAssignmentAdmissionConfig(in *hostassignment.HostAssignmentAdmissionConfig, out *HostAssignmentAdmissionConfig, s conversion.Scope) error { + return autoConvert_hostassignment_HostAssignmentAdmissionConfig_To_v1_HostAssignmentAdmissionConfig(in, out, s) +} diff --git a/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..ee31de89c12e9 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/zz_generated.deepcopy.go @@ -0,0 +1,51 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostAssignmentAdmissionConfig) DeepCopyInto(out *HostAssignmentAdmissionConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAssignmentAdmissionConfig. +func (in *HostAssignmentAdmissionConfig) DeepCopy() *HostAssignmentAdmissionConfig { + if in == nil { + return nil + } + out := new(HostAssignmentAdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HostAssignmentAdmissionConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/route/apis/hostassignment/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/route/apis/hostassignment/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..e24ccfb9a4e3e --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/hostassignment/zz_generated.deepcopy.go @@ -0,0 +1,51 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package hostassignment + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostAssignmentAdmissionConfig) DeepCopyInto(out *HostAssignmentAdmissionConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAssignmentAdmissionConfig. +func (in *HostAssignmentAdmissionConfig) DeepCopy() *HostAssignmentAdmissionConfig { + if in == nil { + return nil + } + out := new(HostAssignmentAdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HostAssignmentAdmissionConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/doc.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/doc.go new file mode 100644 index 0000000000000..04727861a1ea1 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package api is the internal version of the API. +package ingressadmission diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/register.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/register.go new file mode 100644 index 0000000000000..e0e84492781a6 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/register.go @@ -0,0 +1,33 @@ +package ingressadmission + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var GroupVersion = schema.GroupVersion{Group: "route.openshift.io", Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return GroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return GroupVersion.WithResource(resource).GroupResource() +} + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + Install = schemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &IngressAdmissionConfig{}, + ) + return nil +} + +func (obj *IngressAdmissionConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/types.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/types.go new file mode 100644 index 0000000000000..bc1356398663c --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/types.go @@ -0,0 +1,22 @@ +package ingressadmission + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IngressAdmissionConfig is the configuration for the the ingress +// controller limiter plugin. It changes the behavior of ingress +// objects to behave better with openshift routes and routers. +// *NOTE* This has security implications in the router when handling +// ingress objects +type IngressAdmissionConfig struct { + metav1.TypeMeta + + // AllowHostnameChanges when false or unset openshift does not + // allow changing or adding hostnames to ingress objects. If set + // to true then hostnames can be added or modified which has + // security implications in the router. + AllowHostnameChanges bool +} diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/defaults_test.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/defaults_test.go new file mode 100644 index 0000000000000..e105c48094abc --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/defaults_test.go @@ -0,0 +1,59 @@ +package v1 + +import ( + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/api/apitesting" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/diff" +) + +func roundTrip(t *testing.T, obj runtime.Object) runtime.Object { + scheme, codecs := apitesting.SchemeForOrDie(Install) + data, err := runtime.Encode(codecs.LegacyCodec(GroupVersion), obj) + if err != nil { + t.Errorf("%v\n %#v", err, obj) + return nil + } + obj2, err := runtime.Decode(codecs.UniversalDecoder(), data) + if err != nil { + t.Errorf("%v\nData: %s\nSource: %#v", err, string(data), obj) + return nil + } + obj3 := reflect.New(reflect.TypeOf(obj).Elem()).Interface().(runtime.Object) + err = scheme.Convert(obj2, obj3, nil) + if err != nil { + t.Errorf("%v\nSourceL %#v", err, obj2) + return nil + } + return obj3 +} + +func TestDefaults(t *testing.T) { + tests := []struct { + original *IngressAdmissionConfig + expected *IngressAdmissionConfig + }{ + { + original: &IngressAdmissionConfig{}, + expected: &IngressAdmissionConfig{ + AllowHostnameChanges: false, + }, + }, + } + for i, test := range tests { + t.Logf("test %d", i) + original := test.original + expected := test.expected + obj2 := roundTrip(t, runtime.Object(original)) + got, ok := obj2.(*IngressAdmissionConfig) + if !ok { + t.Errorf("unexpected object: %v", got) + t.FailNow() + } + if !reflect.DeepEqual(got, expected) { + t.Errorf("got different than expected:\nA:\t%#v\nB:\t%#v\n\nDiff:\n%s\n\n%s", got, expected, diff.ObjectDiff(expected, got), diff.ObjectGoPrintSideBySide(expected, got)) + } + } +} diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/doc.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/doc.go new file mode 100644 index 0000000000000..65269e693b22a --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/ingressadmission + +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/register.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/register.go new file mode 100644 index 0000000000000..aecb8a6eec279 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/register.go @@ -0,0 +1,27 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/ingressadmission" +) + +func (obj *IngressAdmissionConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } + +var GroupVersion = schema.GroupVersion{Group: "route.openshift.io", Version: "v1"} + +var ( + localSchemeBuilder = runtime.NewSchemeBuilder( + addKnownTypes, + ingressadmission.Install, + ) + Install = localSchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &IngressAdmissionConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/swagger_doc.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/swagger_doc.go new file mode 100644 index 0000000000000..27266bc8b3f6d --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/swagger_doc.go @@ -0,0 +1,15 @@ +package v1 + +// This file contains methods that can be used by the go-restful package to generate Swagger +// documentation for the object types found in 'types.go' This file is automatically generated +// by hack/update-generated-swagger-descriptions.sh and should be run after a full build of OpenShift. +// ==== DO NOT EDIT THIS FILE MANUALLY ==== + +var map_IngressAdmissionConfig = map[string]string{ + "": "IngressAdmissionConfig is the configuration for the the ingress controller limiter plugin. It changes the behavior of ingress objects to behave better with openshift routes and routers. *NOTE* This has security implications in the router when handling ingress objects", + "allowHostnameChanges": "AllowHostnameChanges when false or unset openshift does not allow changing or adding hostnames to ingress objects. If set to true then hostnames can be added or modified which has security implications in the router.", +} + +func (IngressAdmissionConfig) SwaggerDoc() map[string]string { + return map_IngressAdmissionConfig +} diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/types.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/types.go new file mode 100644 index 0000000000000..a770d0539f449 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/types.go @@ -0,0 +1,22 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IngressAdmissionConfig is the configuration for the the ingress +// controller limiter plugin. It changes the behavior of ingress +// objects to behave better with openshift routes and routers. +// *NOTE* This has security implications in the router when handling +// ingress objects +type IngressAdmissionConfig struct { + metav1.TypeMeta `json:",inline"` + + // AllowHostnameChanges when false or unset openshift does not + // allow changing or adding hostnames to ingress objects. If set + // to true then hostnames can be added or modified which has + // security implications in the router. + AllowHostnameChanges bool `json:"allowHostnameChanges"` +} diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/zz_generated.conversion.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/zz_generated.conversion.go new file mode 100644 index 0000000000000..e2b109eb69a2f --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/zz_generated.conversion.go @@ -0,0 +1,68 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + ingressadmission "k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/ingressadmission" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*IngressAdmissionConfig)(nil), (*ingressadmission.IngressAdmissionConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_IngressAdmissionConfig_To_ingressadmission_IngressAdmissionConfig(a.(*IngressAdmissionConfig), b.(*ingressadmission.IngressAdmissionConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ingressadmission.IngressAdmissionConfig)(nil), (*IngressAdmissionConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_ingressadmission_IngressAdmissionConfig_To_v1_IngressAdmissionConfig(a.(*ingressadmission.IngressAdmissionConfig), b.(*IngressAdmissionConfig), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_IngressAdmissionConfig_To_ingressadmission_IngressAdmissionConfig(in *IngressAdmissionConfig, out *ingressadmission.IngressAdmissionConfig, s conversion.Scope) error { + out.AllowHostnameChanges = in.AllowHostnameChanges + return nil +} + +// Convert_v1_IngressAdmissionConfig_To_ingressadmission_IngressAdmissionConfig is an autogenerated conversion function. +func Convert_v1_IngressAdmissionConfig_To_ingressadmission_IngressAdmissionConfig(in *IngressAdmissionConfig, out *ingressadmission.IngressAdmissionConfig, s conversion.Scope) error { + return autoConvert_v1_IngressAdmissionConfig_To_ingressadmission_IngressAdmissionConfig(in, out, s) +} + +func autoConvert_ingressadmission_IngressAdmissionConfig_To_v1_IngressAdmissionConfig(in *ingressadmission.IngressAdmissionConfig, out *IngressAdmissionConfig, s conversion.Scope) error { + out.AllowHostnameChanges = in.AllowHostnameChanges + return nil +} + +// Convert_ingressadmission_IngressAdmissionConfig_To_v1_IngressAdmissionConfig is an autogenerated conversion function. +func Convert_ingressadmission_IngressAdmissionConfig_To_v1_IngressAdmissionConfig(in *ingressadmission.IngressAdmissionConfig, out *IngressAdmissionConfig, s conversion.Scope) error { + return autoConvert_ingressadmission_IngressAdmissionConfig_To_v1_IngressAdmissionConfig(in, out, s) +} diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..0ff013697a53b --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/zz_generated.deepcopy.go @@ -0,0 +1,51 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressAdmissionConfig) DeepCopyInto(out *IngressAdmissionConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressAdmissionConfig. +func (in *IngressAdmissionConfig) DeepCopy() *IngressAdmissionConfig { + if in == nil { + return nil + } + out := new(IngressAdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressAdmissionConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..c918ac8a2e68a --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/zz_generated.deepcopy.go @@ -0,0 +1,51 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package ingressadmission + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressAdmissionConfig) DeepCopyInto(out *IngressAdmissionConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressAdmissionConfig. +func (in *IngressAdmissionConfig) DeepCopy() *IngressAdmissionConfig { + if in == nil { + return nil + } + out := new(IngressAdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressAdmissionConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/route/hostassignment/admission.go b/openshift-kube-apiserver/admission/route/hostassignment/admission.go new file mode 100644 index 0000000000000..19327fca63e4a --- /dev/null +++ b/openshift-kube-apiserver/admission/route/hostassignment/admission.go @@ -0,0 +1,162 @@ +package hostassignment + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/kubernetes" + authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" + + routev1 "github.com/openshift/api/route/v1" + "github.com/openshift/library-go/pkg/config/helpers" + routecommon "github.com/openshift/library-go/pkg/route" + "github.com/openshift/library-go/pkg/route/hostassignment" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/route" + hostassignmentapi "k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/hostassignment" + hostassignmentv1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/hostassignment/v1" +) + +const PluginName = "route.openshift.io/RouteHostAssignment" + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + pluginConfig, err := readConfig(config) + if err != nil { + return nil, err + } + return newHostAssignment(pluginConfig) + }) +} + +type hostAssignment struct { + *admission.Handler + + hostnameGenerator hostassignment.HostnameGenerator + sarClient authorizationv1.SubjectAccessReviewInterface + validationOpts routecommon.RouteValidationOptions +} + +func readConfig(reader io.Reader) (*hostassignmentapi.HostAssignmentAdmissionConfig, error) { + obj, err := helpers.ReadYAMLToInternal(reader, hostassignmentapi.Install, hostassignmentv1.Install) + if err != nil { + return nil, err + } + if obj == nil { + scheme := runtime.NewScheme() + hostassignmentapi.Install(scheme) + hostassignmentv1.Install(scheme) + external := &hostassignmentv1.HostAssignmentAdmissionConfig{} + scheme.Default(external) + internal := &hostassignmentapi.HostAssignmentAdmissionConfig{} + if err := scheme.Convert(external, internal, nil); err != nil { + return nil, fmt.Errorf("failed to produce default config: %w", err) + } + obj = internal + } + config, ok := obj.(*hostassignmentapi.HostAssignmentAdmissionConfig) + if !ok { + return nil, fmt.Errorf("unexpected config object: %#v", obj) + } + return config, nil +} + +func newHostAssignment(config *hostassignmentapi.HostAssignmentAdmissionConfig) (*hostAssignment, error) { + hostnameGenerator, err := hostassignment.NewSimpleAllocationPlugin(config.Domain) + if err != nil { + return nil, fmt.Errorf("configuration failed: %w", err) + } + return &hostAssignment{ + Handler: admission.NewHandler(admission.Create, admission.Update), + hostnameGenerator: hostnameGenerator, + }, nil +} + +func toRoute(uncastObj runtime.Object) (*routev1.Route, runtime.Unstructured, field.ErrorList) { + u, ok := uncastObj.(runtime.Unstructured) + if !ok { + return nil, nil, field.ErrorList{ + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Route"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{routev1.GroupVersion.String()}), + } + } + + var out routev1.Route + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.UnstructuredContent(), &out); err != nil { + return nil, nil, field.ErrorList{ + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Route"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{routev1.GroupVersion.String()}), + } + } + + return &out, u, nil +} + +var _ admission.MutationInterface = &hostAssignment{} + +func (a *hostAssignment) Admit(ctx context.Context, attributes admission.Attributes, o admission.ObjectInterfaces) error { + if attributes.GetResource().GroupResource() != (schema.GroupResource{Group: "route.openshift.io", Resource: "routes"}) { + return nil + } + // if a subresource is specified, skip it + if len(attributes.GetSubresource()) > 0 { + return nil + } + + switch attributes.GetOperation() { + case admission.Create: + r, u, errs := toRoute(attributes.GetObject()) + if len(errs) > 0 { + return errors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errs) + } + errs = hostassignment.AllocateHost(ctx, r, a.sarClient, a.hostnameGenerator, a.validationOpts) + if len(errs) > 0 { + return errors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errs) + } + content, err := runtime.DefaultUnstructuredConverter.ToUnstructured(r) + if err != nil { + return errors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), field.ErrorList{ + field.InternalError(field.NewPath(""), err), + }) + } + u.SetUnstructuredContent(content) + case admission.Update: + r, _, errs := toRoute(attributes.GetObject()) + if len(errs) > 0 { + return errors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errs) + } + old, _, errs := toRoute(attributes.GetOldObject()) + if len(errs) > 0 { + return errors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errs) + } + + errs = hostassignment.ValidateHostUpdate(ctx, r, old, a.sarClient, a.validationOpts) + if len(errs) > 0 { + return errors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errs) + } + default: + return admission.NewForbidden(attributes, fmt.Errorf("unhandled operation: %v", attributes.GetOperation())) + } + + return nil +} + +var _ initializer.WantsExternalKubeClientSet = &hostAssignment{} + +func (a *hostAssignment) SetExternalKubeClientSet(clientset kubernetes.Interface) { + a.sarClient = clientset.AuthorizationV1().SubjectAccessReviews() + a.validationOpts = route.NewRouteValidationOpts().GetValidationOptions() +} + +func (a *hostAssignment) ValidateInitialization() error { + if a.sarClient == nil { + return fmt.Errorf("missing SubjectAccessReview client") + } + return nil +} diff --git a/openshift-kube-apiserver/admission/route/ingress_admission.go b/openshift-kube-apiserver/admission/route/ingress_admission.go new file mode 100644 index 0000000000000..f59104fe51a08 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/ingress_admission.go @@ -0,0 +1,162 @@ +// This plugin supplements upstream Ingress admission validation +// It takes care of current Openshift specific constraints on Ingress resources +package admission + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/authorization/authorizer" + kextensions "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/networking" + + "github.com/openshift/library-go/pkg/config/helpers" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/ingressadmission" + v1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1" +) + +const ( + IngressAdmission = "route.openshift.io/IngressAdmission" +) + +func Register(plugins *admission.Plugins) { + plugins.Register(IngressAdmission, + func(config io.Reader) (admission.Interface, error) { + pluginConfig, err := readConfig(config) + if err != nil { + return nil, err + } + return NewIngressAdmission(pluginConfig), nil + }) +} + +type ingressAdmission struct { + *admission.Handler + config *ingressadmission.IngressAdmissionConfig + authorizer authorizer.Authorizer +} + +var _ = initializer.WantsAuthorizer(&ingressAdmission{}) +var _ = admission.ValidationInterface(&ingressAdmission{}) + +func NewIngressAdmission(config *ingressadmission.IngressAdmissionConfig) *ingressAdmission { + return &ingressAdmission{ + Handler: admission.NewHandler(admission.Create, admission.Update), + config: config, + } +} + +func readConfig(reader io.Reader) (*ingressadmission.IngressAdmissionConfig, error) { + obj, err := helpers.ReadYAMLToInternal(reader, ingressadmission.Install, v1.Install) + if err != nil { + return nil, err + } + if obj == nil { + return nil, nil + } + config, ok := obj.(*ingressadmission.IngressAdmissionConfig) + if !ok { + return nil, fmt.Errorf("unexpected config object: %#v", obj) + } + // No validation needed since config is just list of strings + return config, nil +} + +func (r *ingressAdmission) SetAuthorizer(a authorizer.Authorizer) { + r.authorizer = a +} + +func (r *ingressAdmission) ValidateInitialization() error { + if r.authorizer == nil { + return fmt.Errorf("%s needs an Openshift Authorizer", IngressAdmission) + } + return nil +} + +func (r *ingressAdmission) Validate(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) error { + if a.GetResource().GroupResource() == kextensions.Resource("ingresses") { + switch a.GetOperation() { + case admission.Create: + if ingress, ok := a.GetObject().(*networking.Ingress); ok { + // if any rules have a host, check whether the user has permission to set them + for i, rule := range ingress.Spec.Rules { + if len(rule.Host) > 0 { + attr := authorizer.AttributesRecord{ + User: a.GetUserInfo(), + Verb: "create", + Namespace: a.GetNamespace(), + Resource: "routes", + Subresource: "custom-host", + APIGroup: "route.openshift.io", + ResourceRequest: true, + } + kind := schema.GroupKind{Group: a.GetResource().Group, Kind: a.GetResource().Resource} + authorized, _, err := r.authorizer.Authorize(ctx, attr) + if err != nil { + return errors.NewInvalid(kind, ingress.Name, field.ErrorList{field.InternalError(field.NewPath("spec", "rules").Index(i), err)}) + } + if authorized != authorizer.DecisionAllow { + return errors.NewInvalid(kind, ingress.Name, field.ErrorList{field.Forbidden(field.NewPath("spec", "rules").Index(i), "you do not have permission to set host fields in ingress rules")}) + } + break + } + } + } + case admission.Update: + if r.config == nil || r.config.AllowHostnameChanges == false { + oldIngress, ok := a.GetOldObject().(*networking.Ingress) + if !ok { + return nil + } + newIngress, ok := a.GetObject().(*networking.Ingress) + if !ok { + return nil + } + if !haveHostnamesChanged(oldIngress, newIngress) { + attr := authorizer.AttributesRecord{ + User: a.GetUserInfo(), + Verb: "update", + Namespace: a.GetNamespace(), + Name: a.GetName(), + Resource: "routes", + Subresource: "custom-host", + APIGroup: "route.openshift.io", + ResourceRequest: true, + } + kind := schema.GroupKind{Group: a.GetResource().Group, Kind: a.GetResource().Resource} + authorized, _, err := r.authorizer.Authorize(ctx, attr) + if err != nil { + return errors.NewInvalid(kind, newIngress.Name, field.ErrorList{field.InternalError(field.NewPath("spec", "rules"), err)}) + } + if authorized == authorizer.DecisionAllow { + return nil + } + return fmt.Errorf("cannot change hostname") + } + } + } + } + return nil +} + +func haveHostnamesChanged(oldIngress, newIngress *networking.Ingress) bool { + hostnameSet := sets.NewString() + for _, element := range oldIngress.Spec.Rules { + hostnameSet.Insert(element.Host) + } + + for _, element := range newIngress.Spec.Rules { + if present := hostnameSet.Has(element.Host); !present { + return false + } + } + + return true +} diff --git a/openshift-kube-apiserver/admission/route/ingress_admission_test.go b/openshift-kube-apiserver/admission/route/ingress_admission_test.go new file mode 100644 index 0000000000000..b1013b8346a30 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/ingress_admission_test.go @@ -0,0 +1,171 @@ +package admission + +import ( + "context" + "testing" + + "k8s.io/kubernetes/pkg/apis/networking" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authorization/authorizer" + kextensions "k8s.io/kubernetes/pkg/apis/extensions" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/ingressadmission" +) + +type fakeAuthorizer struct { + allow authorizer.Decision + err error +} + +func (a *fakeAuthorizer) Authorize(context.Context, authorizer.Attributes) (authorizer.Decision, string, error) { + return a.allow, "", a.err +} + +func TestAdmission(t *testing.T) { + var newIngress *networking.Ingress + var oldIngress *networking.Ingress + + tests := []struct { + config *ingressadmission.IngressAdmissionConfig + testName string + oldHost, newHost string + op admission.Operation + admit bool + allow authorizer.Decision + }{ + { + admit: true, + config: emptyConfig(), + op: admission.Create, + testName: "No errors on create", + }, + { + admit: true, + config: emptyConfig(), + op: admission.Update, + newHost: "foo.com", + oldHost: "foo.com", + testName: "keeping the host the same should pass", + }, + { + admit: true, + config: emptyConfig(), + op: admission.Update, + oldHost: "foo.com", + testName: "deleting a hostname should pass", + }, + { + admit: false, + config: emptyConfig(), + op: admission.Update, + newHost: "foo.com", + oldHost: "bar.com", + testName: "changing hostname should fail", + }, + { + admit: true, + allow: authorizer.DecisionAllow, + config: emptyConfig(), + op: admission.Update, + newHost: "foo.com", + oldHost: "bar.com", + testName: "changing hostname should succeed if the user has permission", + }, + { + admit: false, + config: nil, + op: admission.Update, + newHost: "foo.com", + oldHost: "bar.com", + testName: "unconfigured plugin should still fail", + }, + { + admit: true, + config: testConfigUpdateAllow(), + op: admission.Update, + newHost: "foo.com", + oldHost: "bar.com", + testName: "Upstream Hostname updates enabled", + }, + { + admit: true, + config: testConfigUpdateAllow(), + op: admission.Update, + newHost: "foo.com", + testName: "add new hostname with upstream rules", + }, + { + admit: false, + allow: authorizer.DecisionNoOpinion, + config: emptyConfig(), + op: admission.Create, + newHost: "foo.com", + testName: "setting the host should require permission", + }, + { + admit: true, + allow: authorizer.DecisionAllow, + config: emptyConfig(), + op: admission.Create, + newHost: "foo.com", + testName: "setting the host should pass if user has permission", + }, + } + for _, test := range tests { + if len(test.newHost) > 0 { + newIngress = &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: networking.IngressSpec{ + Rules: []networking.IngressRule{ + { + Host: test.newHost, + }, + }, + }, + } + } else { + //Used to test deleting a hostname + newIngress = &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + } + } + handler := NewIngressAdmission(test.config) + handler.SetAuthorizer(&fakeAuthorizer{allow: test.allow}) + + if len(test.oldHost) > 0 { + //Provides the previous state of an ingress object + oldIngress = &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: networking.IngressSpec{ + Rules: []networking.IngressRule{ + { + Host: test.oldHost, + }, + }, + }, + } + } else { + oldIngress = nil + } + + err := handler.Validate(context.TODO(), admission.NewAttributesRecord(newIngress, oldIngress, kextensions.Kind("ingresses").WithVersion("Version"), "namespace", newIngress.ObjectMeta.Name, kextensions.Resource("ingresses").WithVersion("version"), "", test.op, nil, false, nil), nil) + if test.admit && err != nil { + t.Errorf("%s: expected no error but got: %s", test.testName, err) + } else if !test.admit && err == nil { + t.Errorf("%s: expected an error", test.testName) + } + } + +} + +func emptyConfig() *ingressadmission.IngressAdmissionConfig { + return &ingressadmission.IngressAdmissionConfig{} +} + +func testConfigUpdateAllow() *ingressadmission.IngressAdmissionConfig { + return &ingressadmission.IngressAdmissionConfig{ + AllowHostnameChanges: true, + } +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/doc.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/doc.go new file mode 100644 index 0000000000000..ae163f472d40a --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package api is the internal version of the API. +package podnodeconstraints diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/register.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/register.go new file mode 100644 index 0000000000000..5b8add00bb815 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/register.go @@ -0,0 +1,33 @@ +package podnodeconstraints + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var GroupVersion = schema.GroupVersion{Group: "scheduling.openshift.io", Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return GroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return GroupVersion.WithResource(resource).GroupResource() +} + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + Install = schemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &PodNodeConstraintsConfig{}, + ) + return nil +} + +func (obj *PodNodeConstraintsConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/types.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/types.go new file mode 100644 index 0000000000000..27cebad199ed0 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/types.go @@ -0,0 +1,19 @@ +package podnodeconstraints + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodNodeConstraintsConfig is the configuration for the pod node name +// and node selector constraint plug-in. For accounts, serviceaccounts, +// and groups which lack the "pods/binding" permission, Loading this +// plugin will prevent setting NodeName on pod specs and will prevent +// setting NodeSelectors whose labels appear in the blacklist field +// "NodeSelectorLabelBlacklist" +type PodNodeConstraintsConfig struct { + metav1.TypeMeta + // NodeSelectorLabelBlacklist specifies a list of labels which cannot be set by entities without the "pods/binding" permission + NodeSelectorLabelBlacklist []string +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/defaults.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/defaults.go new file mode 100644 index 0000000000000..54d718cfc91af --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/defaults.go @@ -0,0 +1,19 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func SetDefaults_PodNodeConstraintsConfig(obj *PodNodeConstraintsConfig) { + if obj.NodeSelectorLabelBlacklist == nil { + obj.NodeSelectorLabelBlacklist = []string{ + corev1.LabelHostname, + } + } +} + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&PodNodeConstraintsConfig{}, func(obj interface{}) { SetDefaults_PodNodeConstraintsConfig(obj.(*PodNodeConstraintsConfig)) }) + return nil +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/defaults_test.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/defaults_test.go new file mode 100644 index 0000000000000..513084ad95122 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/defaults_test.go @@ -0,0 +1,59 @@ +package v1 + +import ( + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/api/apitesting" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/diff" +) + +func roundTrip(t *testing.T, obj runtime.Object) runtime.Object { + scheme, codecs := apitesting.SchemeForOrDie(Install) + data, err := runtime.Encode(codecs.LegacyCodec(GroupVersion), obj) + if err != nil { + t.Errorf("%v\n %#v", err, obj) + return nil + } + obj2, err := runtime.Decode(codecs.UniversalDecoder(), data) + if err != nil { + t.Errorf("%v\nData: %s\nSource: %#v", err, string(data), obj) + return nil + } + obj3 := reflect.New(reflect.TypeOf(obj).Elem()).Interface().(runtime.Object) + err = scheme.Convert(obj2, obj3, nil) + if err != nil { + t.Errorf("%v\nSource: %#v", err, obj2) + return nil + } + return obj3 +} + +func TestDefaults(t *testing.T) { + tests := []struct { + original *PodNodeConstraintsConfig + expected *PodNodeConstraintsConfig + }{ + { + original: &PodNodeConstraintsConfig{}, + expected: &PodNodeConstraintsConfig{ + NodeSelectorLabelBlacklist: []string{"kubernetes.io/hostname"}, + }, + }, + } + for i, test := range tests { + t.Logf("test %d", i) + original := test.original + expected := test.expected + obj2 := roundTrip(t, runtime.Object(original)) + got, ok := obj2.(*PodNodeConstraintsConfig) + if !ok { + t.Errorf("unexpected object: %v", got) + t.FailNow() + } + if !reflect.DeepEqual(got, expected) { + t.Errorf("got different than expected:\nA:\t%#v\nB:\t%#v\n\nDiff:\n%s\n\n%s", got, expected, diff.ObjectDiff(expected, got), diff.ObjectGoPrintSideBySide(expected, got)) + } + } +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/doc.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/doc.go new file mode 100644 index 0000000000000..602ddf4d19a41 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints + +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/register.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/register.go new file mode 100644 index 0000000000000..b836b750fdb3f --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/register.go @@ -0,0 +1,28 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints" +) + +func (obj *PodNodeConstraintsConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } + +var GroupVersion = schema.GroupVersion{Group: "scheduling.openshift.io", Version: "v1"} + +var ( + localSchemeBuilder = runtime.NewSchemeBuilder( + addKnownTypes, + podnodeconstraints.Install, + + addDefaultingFuncs, + ) + Install = localSchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &PodNodeConstraintsConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/swagger_doc.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/swagger_doc.go new file mode 100644 index 0000000000000..95e3d2220841c --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/swagger_doc.go @@ -0,0 +1,15 @@ +package v1 + +// This file contains methods that can be used by the go-restful package to generate Swagger +// documentation for the object types found in 'types.go' This file is automatically generated +// by hack/update-generated-swagger-descriptions.sh and should be run after a full build of OpenShift. +// ==== DO NOT EDIT THIS FILE MANUALLY ==== + +var map_PodNodeConstraintsConfig = map[string]string{ + "": "PodNodeConstraintsConfig is the configuration for the pod node name and node selector constraint plug-in. For accounts, serviceaccounts and groups which lack the \"pods/binding\" permission, Loading this plugin will prevent setting NodeName on pod specs and will prevent setting NodeSelectors whose labels appear in the blacklist field \"NodeSelectorLabelBlacklist\"", + "nodeSelectorLabelBlacklist": "NodeSelectorLabelBlacklist specifies a list of labels which cannot be set by entities without the \"pods/binding\" permission", +} + +func (PodNodeConstraintsConfig) SwaggerDoc() map[string]string { + return map_PodNodeConstraintsConfig +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/types.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/types.go new file mode 100644 index 0000000000000..3ffd5acdb8952 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/types.go @@ -0,0 +1,20 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodNodeConstraintsConfig is the configuration for the pod node name +// and node selector constraint plug-in. For accounts, serviceaccounts +// and groups which lack the "pods/binding" permission, Loading this +// plugin will prevent setting NodeName on pod specs and will prevent +// setting NodeSelectors whose labels appear in the blacklist field +// "NodeSelectorLabelBlacklist" +type PodNodeConstraintsConfig struct { + metav1.TypeMeta `json:",inline"` + + // NodeSelectorLabelBlacklist specifies a list of labels which cannot be set by entities without the "pods/binding" permission + NodeSelectorLabelBlacklist []string `json:"nodeSelectorLabelBlacklist" description:"list of labels which cannot be set by entities without the 'pods/binding' permission"` +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/zz_generated.conversion.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/zz_generated.conversion.go new file mode 100644 index 0000000000000..40a730275e95d --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/zz_generated.conversion.go @@ -0,0 +1,70 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + unsafe "unsafe" + + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + podnodeconstraints "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*PodNodeConstraintsConfig)(nil), (*podnodeconstraints.PodNodeConstraintsConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodNodeConstraintsConfig_To_podnodeconstraints_PodNodeConstraintsConfig(a.(*PodNodeConstraintsConfig), b.(*podnodeconstraints.PodNodeConstraintsConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*podnodeconstraints.PodNodeConstraintsConfig)(nil), (*PodNodeConstraintsConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_podnodeconstraints_PodNodeConstraintsConfig_To_v1_PodNodeConstraintsConfig(a.(*podnodeconstraints.PodNodeConstraintsConfig), b.(*PodNodeConstraintsConfig), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_PodNodeConstraintsConfig_To_podnodeconstraints_PodNodeConstraintsConfig(in *PodNodeConstraintsConfig, out *podnodeconstraints.PodNodeConstraintsConfig, s conversion.Scope) error { + out.NodeSelectorLabelBlacklist = *(*[]string)(unsafe.Pointer(&in.NodeSelectorLabelBlacklist)) + return nil +} + +// Convert_v1_PodNodeConstraintsConfig_To_podnodeconstraints_PodNodeConstraintsConfig is an autogenerated conversion function. +func Convert_v1_PodNodeConstraintsConfig_To_podnodeconstraints_PodNodeConstraintsConfig(in *PodNodeConstraintsConfig, out *podnodeconstraints.PodNodeConstraintsConfig, s conversion.Scope) error { + return autoConvert_v1_PodNodeConstraintsConfig_To_podnodeconstraints_PodNodeConstraintsConfig(in, out, s) +} + +func autoConvert_podnodeconstraints_PodNodeConstraintsConfig_To_v1_PodNodeConstraintsConfig(in *podnodeconstraints.PodNodeConstraintsConfig, out *PodNodeConstraintsConfig, s conversion.Scope) error { + out.NodeSelectorLabelBlacklist = *(*[]string)(unsafe.Pointer(&in.NodeSelectorLabelBlacklist)) + return nil +} + +// Convert_podnodeconstraints_PodNodeConstraintsConfig_To_v1_PodNodeConstraintsConfig is an autogenerated conversion function. +func Convert_podnodeconstraints_PodNodeConstraintsConfig_To_v1_PodNodeConstraintsConfig(in *podnodeconstraints.PodNodeConstraintsConfig, out *PodNodeConstraintsConfig, s conversion.Scope) error { + return autoConvert_podnodeconstraints_PodNodeConstraintsConfig_To_v1_PodNodeConstraintsConfig(in, out, s) +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..eaf84add54c8b --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/zz_generated.deepcopy.go @@ -0,0 +1,56 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodNodeConstraintsConfig) DeepCopyInto(out *PodNodeConstraintsConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.NodeSelectorLabelBlacklist != nil { + in, out := &in.NodeSelectorLabelBlacklist, &out.NodeSelectorLabelBlacklist + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodNodeConstraintsConfig. +func (in *PodNodeConstraintsConfig) DeepCopy() *PodNodeConstraintsConfig { + if in == nil { + return nil + } + out := new(PodNodeConstraintsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodNodeConstraintsConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..a37350369cdba --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/zz_generated.deepcopy.go @@ -0,0 +1,56 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package podnodeconstraints + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodNodeConstraintsConfig) DeepCopyInto(out *PodNodeConstraintsConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.NodeSelectorLabelBlacklist != nil { + in, out := &in.NodeSelectorLabelBlacklist, &out.NodeSelectorLabelBlacklist + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodNodeConstraintsConfig. +func (in *PodNodeConstraintsConfig) DeepCopy() *PodNodeConstraintsConfig { + if in == nil { + return nil + } + out := new(PodNodeConstraintsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodNodeConstraintsConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/scheduler/nodeenv/admission.go b/openshift-kube-apiserver/admission/scheduler/nodeenv/admission.go new file mode 100644 index 0000000000000..b52b8242550cc --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/nodeenv/admission.go @@ -0,0 +1,174 @@ +package nodeenv + +import ( + "context" + "errors" + "fmt" + "io" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/informers" + corev1listers "k8s.io/client-go/listers/core/v1" + coreapi "k8s.io/kubernetes/pkg/apis/core" + + projectv1 "github.com/openshift/api/project/v1" + "github.com/openshift/apiserver-library-go/pkg/labelselector" +) + +func Register(plugins *admission.Plugins) { + plugins.Register("scheduling.openshift.io/OriginPodNodeEnvironment", + func(config io.Reader) (admission.Interface, error) { + return NewPodNodeEnvironment() + }) +} + +const ( + timeToWaitForCacheSync = 10 * time.Second + kubeProjectNodeSelector = "scheduler.alpha.kubernetes.io/node-selector" +) + +// podNodeEnvironment is an implementation of admission.MutationInterface. +type podNodeEnvironment struct { + *admission.Handler + nsLister corev1listers.NamespaceLister + nsListerSynced func() bool + nodeLister corev1listers.NodeLister + nodeListerSynced func() bool + // TODO this should become a piece of config passed to the admission plugin + defaultNodeSelector string +} + +var _ = initializer.WantsExternalKubeInformerFactory(&podNodeEnvironment{}) +var _ = WantsDefaultNodeSelector(&podNodeEnvironment{}) +var _ = admission.ValidationInterface(&podNodeEnvironment{}) +var _ = admission.MutationInterface(&podNodeEnvironment{}) + +// Admit enforces that pod and its project node label selectors matches at least a node in the cluster. +func (p *podNodeEnvironment) admit(ctx context.Context, a admission.Attributes, mutationAllowed bool) (err error) { + resource := a.GetResource().GroupResource() + if resource != corev1.Resource("pods") { + return nil + } + if a.GetSubresource() != "" { + // only run the checks below on pods proper and not subresources + return nil + } + + obj := a.GetObject() + pod, ok := obj.(*coreapi.Pod) + if !ok { + return nil + } + + name := pod.Name + + if !p.waitForSyncedStore(time.After(timeToWaitForCacheSync)) { + return admission.NewForbidden(a, errors.New("scheduling.openshift.io/OriginPodNodeEnvironment: caches not synchronized")) + } + namespace, err := p.nsLister.Get(a.GetNamespace()) + if err != nil { + return apierrors.NewForbidden(resource, name, err) + } + + // If scheduler.alpha.kubernetes.io/node-selector is set on the namespace, + // do not process the pod further. + if _, ok := namespace.ObjectMeta.Annotations[kubeProjectNodeSelector]; ok { + return nil + } + + selector := p.defaultNodeSelector + if projectNodeSelector, ok := namespace.ObjectMeta.Annotations[projectv1.ProjectNodeSelector]; ok { + selector = projectNodeSelector + } + // we might consider in the future to allow advanced syntax selectors and use labels.Parse here instead + projectNodeSelector, err := labelselector.Parse(selector) + if err != nil { + return err + } + + if labelselector.Conflicts(projectNodeSelector, pod.Spec.NodeSelector) { + return apierrors.NewForbidden(resource, name, fmt.Errorf("pod node label selector conflicts with its project node label selector")) + } + + if !mutationAllowed && len(labelselector.Merge(projectNodeSelector, pod.Spec.NodeSelector)) != len(pod.Spec.NodeSelector) { + // no conflict, different size => pod.Spec.NodeSelector does not contain projectNodeSelector + return apierrors.NewForbidden(resource, name, fmt.Errorf("pod node label selector does not extend project node label selector")) + } + + if len(pod.Spec.NodeName) > 0 && len(projectNodeSelector) > 0 { + node, err := p.nodeLister.Get(pod.Spec.NodeName) + if err != nil { + return apierrors.NewForbidden(resource, name, fmt.Errorf("cannot validate project node label selector: %v", err)) + } + projectNodeSelectorAdvanced, err := labels.Parse(selector) + if err != nil { + return err + } + if !projectNodeSelectorAdvanced.Matches(labels.Set(node.Labels)) { + return apierrors.NewForbidden(resource, name, fmt.Errorf("pod node name conflicts with project node label selector")) + } + } + + // modify pod node selector = project node selector + current pod node selector + pod.Spec.NodeSelector = labelselector.Merge(projectNodeSelector, pod.Spec.NodeSelector) + + return nil +} + +func (p *podNodeEnvironment) Admit(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) (err error) { + return p.admit(ctx, a, true) +} + +func (p *podNodeEnvironment) Validate(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) (err error) { + return p.admit(ctx, a, false) +} + +func (p *podNodeEnvironment) SetDefaultNodeSelector(in string) { + p.defaultNodeSelector = in +} + +func (p *podNodeEnvironment) SetExternalKubeInformerFactory(kubeInformers informers.SharedInformerFactory) { + p.nsLister = kubeInformers.Core().V1().Namespaces().Lister() + p.nsListerSynced = kubeInformers.Core().V1().Namespaces().Informer().HasSynced + p.nodeLister = kubeInformers.Core().V1().Nodes().Lister() + p.nodeListerSynced = kubeInformers.Core().V1().Nodes().Informer().HasSynced +} + +func (p *podNodeEnvironment) waitForSyncedStore(timeout <-chan time.Time) bool { + for !p.nsListerSynced() || !p.nodeListerSynced() { + select { + case <-time.After(100 * time.Millisecond): + case <-timeout: + return p.nsListerSynced() && p.nodeListerSynced() + } + } + + return true +} + +func (p *podNodeEnvironment) ValidateInitialization() error { + if p.nsLister == nil { + return fmt.Errorf("project node environment plugin needs a namespace lister") + } + if p.nsListerSynced == nil { + return fmt.Errorf("project node environment plugin needs a namespace lister synced") + } + if p.nodeLister == nil { + return fmt.Errorf("project node environment plugin needs a node lister") + } + if p.nodeListerSynced == nil { + return fmt.Errorf("project node environment plugin needs a node lister synced") + } + return nil +} + +func NewPodNodeEnvironment() (admission.Interface, error) { + return &podNodeEnvironment{ + Handler: admission.NewHandler(admission.Create), + }, nil +} diff --git a/openshift-kube-apiserver/admission/scheduler/nodeenv/admission_test.go b/openshift-kube-apiserver/admission/scheduler/nodeenv/admission_test.go new file mode 100644 index 0000000000000..b3d058ba34a2f --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/nodeenv/admission_test.go @@ -0,0 +1,211 @@ +package nodeenv + +import ( + "context" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/admission" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + kapi "k8s.io/kubernetes/pkg/apis/core" + + projectv1 "github.com/openshift/api/project/v1" + "github.com/openshift/apiserver-library-go/pkg/labelselector" +) + +// TestPodAdmission verifies various scenarios involving pod/project/global node label selectors +func TestPodAdmission(t *testing.T) { + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testProject", + Namespace: "", + }, + } + + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "worker-1", + Namespace: "", + Labels: map[string]string{ + "worker": "true", + }, + }, + } + + handler := &podNodeEnvironment{} + pod := &kapi.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "testPod"}, + } + + tests := []struct { + defaultNodeSelector string + projectNodeSelector string + podNodeSelector map[string]string + podNodeName string + mergedNodeSelector map[string]string + ignoreProjectNodeSelector bool + admit bool + testName string + }{ + { + defaultNodeSelector: "", + podNodeSelector: map[string]string{}, + mergedNodeSelector: map[string]string{}, + ignoreProjectNodeSelector: true, + admit: true, + testName: "No node selectors", + }, + { + defaultNodeSelector: "infra = false", + podNodeSelector: map[string]string{}, + mergedNodeSelector: map[string]string{"infra": "false"}, + ignoreProjectNodeSelector: true, + admit: true, + testName: "Default node selector and no conflicts", + }, + { + defaultNodeSelector: "", + projectNodeSelector: "infra = false", + podNodeSelector: map[string]string{}, + mergedNodeSelector: map[string]string{"infra": "false"}, + admit: true, + testName: "Project node selector and no conflicts", + }, + { + defaultNodeSelector: "infra = false", + projectNodeSelector: "", + podNodeSelector: map[string]string{}, + mergedNodeSelector: map[string]string{}, + admit: true, + testName: "Empty project node selector and no conflicts", + }, + { + defaultNodeSelector: "infra = false", + projectNodeSelector: "infra=true", + podNodeSelector: map[string]string{}, + mergedNodeSelector: map[string]string{"infra": "true"}, + admit: true, + testName: "Default and project node selector, no conflicts", + }, + { + defaultNodeSelector: "infra = false", + projectNodeSelector: "infra=true", + podNodeSelector: map[string]string{"env": "test"}, + mergedNodeSelector: map[string]string{"infra": "true", "env": "test"}, + admit: true, + testName: "Project and pod node selector, no conflicts", + }, + { + defaultNodeSelector: "env = test", + projectNodeSelector: "infra=true", + podNodeSelector: map[string]string{"infra": "false"}, + mergedNodeSelector: map[string]string{"infra": "false"}, + admit: false, + testName: "Conflicting pod and project node selector, one label", + }, + { + defaultNodeSelector: "env=dev", + projectNodeSelector: "infra=false, env = test", + podNodeSelector: map[string]string{"env": "dev", "color": "blue"}, + mergedNodeSelector: map[string]string{"env": "dev", "color": "blue"}, + admit: false, + testName: "Conflicting pod and project node selector, multiple labels", + }, + { + defaultNodeSelector: "", + projectNodeSelector: "worker=true", + podNodeName: "worker-1", + podNodeSelector: nil, + mergedNodeSelector: map[string]string{"worker": "true"}, + admit: true, + testName: "node referenced in pod.nodeName does not conflict with project node selector", + }, + { + defaultNodeSelector: "", + projectNodeSelector: "", + podNodeName: "worker-1", + podNodeSelector: map[string]string{"worker": "false"}, + mergedNodeSelector: map[string]string{"worker": "false"}, + admit: true, + // default to kube behavior: let this fail by kubelet + testName: "node referenced in pod spec.nodeName can conflict with its own node selector when no project node selector is specified", + }, + { + defaultNodeSelector: "worker = true", + projectNodeSelector: "worker=false", + podNodeName: "worker-1", + podNodeSelector: nil, + mergedNodeSelector: nil, + admit: false, + testName: "node referenced in pod spec.nodeName conflicts with project node selector", + }, + { + defaultNodeSelector: "", + projectNodeSelector: "worker=true", + podNodeName: "worker-2", + podNodeSelector: nil, + mergedNodeSelector: nil, + admit: false, + testName: "missing node referenced in pod spec.nodeName does not admit", + }, + } + for _, test := range tests { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + indexer.Add(namespace) + indexer.Add(node) + handler.nsLister = corev1listers.NewNamespaceLister(indexer) + handler.nsListerSynced = func() bool { return true } + handler.nodeLister = corev1listers.NewNodeLister(indexer) + handler.nodeListerSynced = func() bool { return true } + handler.defaultNodeSelector = test.defaultNodeSelector + + if !test.ignoreProjectNodeSelector { + namespace.ObjectMeta.Annotations = map[string]string{projectv1.ProjectNodeSelector: test.projectNodeSelector} + } + pod.Spec = kapi.PodSpec{NodeSelector: test.podNodeSelector, NodeName: test.podNodeName} + + attrs := admission.NewAttributesRecord(pod, nil, kapi.Kind("Pod").WithVersion("version"), "testProject", namespace.ObjectMeta.Name, kapi.Resource("pods").WithVersion("version"), "", admission.Create, nil, false, nil) + err := handler.Admit(context.TODO(), attrs, nil) + if test.admit && err != nil { + t.Errorf("Test: %s, expected no error but got: %s", test.testName, err) + } else if !test.admit && err == nil { + t.Errorf("Test: %s, expected an error", test.testName) + } else if err == nil { + if err := handler.Validate(context.TODO(), attrs, nil); err != nil { + t.Errorf("Test: %s, unexpected Validate error after Admit succeeded: %v", test.testName, err) + } + } + + if !labelselector.Equals(test.mergedNodeSelector, pod.Spec.NodeSelector) { + t.Errorf("Test: %s, expected: %s but got: %s", test.testName, test.mergedNodeSelector, pod.Spec.NodeSelector) + } else if len(test.projectNodeSelector) > 0 { + firstProjectKey := strings.TrimSpace(strings.Split(test.projectNodeSelector, "=")[0]) + delete(pod.Spec.NodeSelector, firstProjectKey) + if err := handler.Validate(context.TODO(), attrs, nil); err == nil { + t.Errorf("Test: %s, expected Validate error after removing project key %q", test.testName, firstProjectKey) + } + } + } +} + +func TestHandles(t *testing.T) { + for op, shouldHandle := range map[admission.Operation]bool{ + admission.Create: true, + admission.Update: false, + admission.Connect: false, + admission.Delete: false, + } { + nodeEnvionment, err := NewPodNodeEnvironment() + if err != nil { + t.Errorf("%v: error getting node environment: %v", op, err) + continue + } + + if e, a := shouldHandle, nodeEnvionment.Handles(op); e != a { + t.Errorf("%v: shouldHandle=%t, handles=%t", op, e, a) + } + } +} diff --git a/openshift-kube-apiserver/admission/scheduler/nodeenv/intializers.go b/openshift-kube-apiserver/admission/scheduler/nodeenv/intializers.go new file mode 100644 index 0000000000000..534905cb06120 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/nodeenv/intializers.go @@ -0,0 +1,28 @@ +package nodeenv + +import ( + "k8s.io/apiserver/pkg/admission" +) + +func NewInitializer(defaultNodeSelector string) admission.PluginInitializer { + return &localInitializer{ + defaultNodeSelector: defaultNodeSelector, + } +} + +type WantsDefaultNodeSelector interface { + SetDefaultNodeSelector(string) + admission.InitializationValidator +} + +type localInitializer struct { + defaultNodeSelector string +} + +// Initialize will check the initialization interfaces implemented by each plugin +// and provide the appropriate initialization data +func (i *localInitializer) Initialize(plugin admission.Interface) { + if wants, ok := plugin.(WantsDefaultNodeSelector); ok { + wants.SetDefaultNodeSelector(i.defaultNodeSelector) + } +} diff --git a/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/admission.go b/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/admission.go new file mode 100644 index 0000000000000..05ef26277fcac --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/admission.go @@ -0,0 +1,205 @@ +package podnodeconstraints + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/klog/v2" + coreapi "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/auth/nodeidentifier" + + "github.com/openshift/library-go/pkg/config/helpers" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints" + v1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1" +) + +const PluginName = "scheduling.openshift.io/PodNodeConstraints" + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, + func(config io.Reader) (admission.Interface, error) { + pluginConfig, err := readConfig(config) + if err != nil { + return nil, err + } + if pluginConfig == nil { + klog.Infof("Admission plugin %q is not configured so it will be disabled.", PluginName) + return nil, nil + } + return NewPodNodeConstraints(pluginConfig, nodeidentifier.NewDefaultNodeIdentifier()), nil + }) +} + +// NewPodNodeConstraints creates a new admission plugin to prevent objects that contain pod templates +// from containing node bindings by name or selector based on role permissions. +func NewPodNodeConstraints(config *podnodeconstraints.PodNodeConstraintsConfig, nodeIdentifier nodeidentifier.NodeIdentifier) admission.Interface { + plugin := podNodeConstraints{ + config: config, + Handler: admission.NewHandler(admission.Create, admission.Update), + nodeIdentifier: nodeIdentifier, + } + if config != nil { + plugin.selectorLabelBlacklist = sets.NewString(config.NodeSelectorLabelBlacklist...) + } + + return &plugin +} + +type podNodeConstraints struct { + *admission.Handler + selectorLabelBlacklist sets.String + config *podnodeconstraints.PodNodeConstraintsConfig + authorizer authorizer.Authorizer + nodeIdentifier nodeidentifier.NodeIdentifier +} + +var _ = initializer.WantsAuthorizer(&podNodeConstraints{}) +var _ = admission.ValidationInterface(&podNodeConstraints{}) + +func shouldCheckResource(resource schema.GroupResource, kind schema.GroupKind) (bool, error) { + expectedKind, shouldCheck := resourcesToCheck[resource] + if !shouldCheck { + return false, nil + } + if expectedKind != kind { + return false, fmt.Errorf("Unexpected resource kind %v for resource %v", &kind, &resource) + } + return true, nil +} + +// resourcesToCheck is a map of resources and corresponding kinds of things that we want handled in this plugin +var resourcesToCheck = map[schema.GroupResource]schema.GroupKind{ + coreapi.Resource("pods"): coreapi.Kind("Pod"), +} + +func readConfig(reader io.Reader) (*podnodeconstraints.PodNodeConstraintsConfig, error) { + obj, err := helpers.ReadYAMLToInternal(reader, podnodeconstraints.Install, v1.Install) + if err != nil { + return nil, err + } + if obj == nil { + return nil, nil + } + config, ok := obj.(*podnodeconstraints.PodNodeConstraintsConfig) + if !ok { + return nil, fmt.Errorf("unexpected config object: %#v", obj) + } + // No validation needed since config is just list of strings + return config, nil +} + +func (o *podNodeConstraints) Validate(ctx context.Context, attr admission.Attributes, _ admission.ObjectInterfaces) error { + switch { + case o.config == nil, + attr.GetSubresource() != "": + return nil + } + shouldCheck, err := shouldCheckResource(attr.GetResource().GroupResource(), attr.GetKind().GroupKind()) + if err != nil { + return err + } + if !shouldCheck { + return nil + } + // Only check Create operation on pods + if attr.GetResource().GroupResource() == coreapi.Resource("pods") && attr.GetOperation() != admission.Create { + return nil + } + + return o.validatePodSpec(ctx, attr, attr.GetObject().(*coreapi.Pod).Spec) +} + +// validate PodSpec if NodeName or NodeSelector are specified +func (o *podNodeConstraints) validatePodSpec(ctx context.Context, attr admission.Attributes, ps coreapi.PodSpec) error { + // a node creating a mirror pod that targets itself is allowed + // see the NodeRestriction plugin for further details + if o.isNodeSelfTargetWithMirrorPod(attr, ps.NodeName) { + return nil + } + + matchingLabels := []string{} + // nodeSelector blacklist filter + for nodeSelectorLabel := range ps.NodeSelector { + if o.selectorLabelBlacklist.Has(nodeSelectorLabel) { + matchingLabels = append(matchingLabels, nodeSelectorLabel) + } + } + // nodeName constraint + if len(ps.NodeName) > 0 || len(matchingLabels) > 0 { + allow, err := o.checkPodsBindAccess(ctx, attr) + if err != nil { + return err + } + if !allow { + switch { + case len(ps.NodeName) > 0 && len(matchingLabels) == 0: + return admission.NewForbidden(attr, fmt.Errorf("node selection by nodeName is prohibited by policy for your role")) + case len(ps.NodeName) == 0 && len(matchingLabels) > 0: + return admission.NewForbidden(attr, fmt.Errorf("node selection by label(s) %v is prohibited by policy for your role", matchingLabels)) + case len(ps.NodeName) > 0 && len(matchingLabels) > 0: + return admission.NewForbidden(attr, fmt.Errorf("node selection by nodeName and label(s) %v is prohibited by policy for your role", matchingLabels)) + } + } + } + return nil +} + +func (o *podNodeConstraints) SetAuthorizer(a authorizer.Authorizer) { + o.authorizer = a +} + +func (o *podNodeConstraints) ValidateInitialization() error { + if o.authorizer == nil { + return fmt.Errorf("%s requires an authorizer", PluginName) + } + if o.nodeIdentifier == nil { + return fmt.Errorf("%s requires a node identifier", PluginName) + } + return nil +} + +// build LocalSubjectAccessReview struct to validate role via checkAccess +func (o *podNodeConstraints) checkPodsBindAccess(ctx context.Context, attr admission.Attributes) (bool, error) { + authzAttr := authorizer.AttributesRecord{ + User: attr.GetUserInfo(), + Verb: "create", + Namespace: attr.GetNamespace(), + Resource: "pods", + Subresource: "binding", + APIGroup: coreapi.GroupName, + ResourceRequest: true, + } + if attr.GetResource().GroupResource() == coreapi.Resource("pods") { + authzAttr.Name = attr.GetName() + } + authorized, _, err := o.authorizer.Authorize(ctx, authzAttr) + return authorized == authorizer.DecisionAllow, err +} + +func (o *podNodeConstraints) isNodeSelfTargetWithMirrorPod(attr admission.Attributes, nodeName string) bool { + // make sure we are actually trying to target a node + if len(nodeName) == 0 { + return false + } + // this check specifically requires the object to be pod (unlike the other checks where we want any pod spec) + pod, ok := attr.GetObject().(*coreapi.Pod) + if !ok { + return false + } + // note that anyone can create a mirror pod, but they are not privileged in any way + // they are actually highly constrained since they cannot reference secrets + // nodes can only create and delete them, and they will delete any "orphaned" mirror pods + if _, isMirrorPod := pod.Annotations[coreapi.MirrorPodAnnotationKey]; !isMirrorPod { + return false + } + // we are targeting a node with a mirror pod + // confirm the user is a node that is targeting itself + actualNodeName, isNode := o.nodeIdentifier.NodeIdentity(attr.GetUserInfo()) + return isNode && actualNodeName == nodeName +} diff --git a/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/admission_test.go b/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/admission_test.go new file mode 100644 index 0000000000000..a5587c5d0ee88 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/admission_test.go @@ -0,0 +1,283 @@ +package podnodeconstraints + +import ( + "bytes" + "context" + "fmt" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/authentication/serviceaccount" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" + kapi "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/auth/nodeidentifier" + + authorizationv1 "github.com/openshift/api/authorization/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints" +) + +func TestPodNodeConstraints(t *testing.T) { + ns := metav1.NamespaceDefault + tests := []struct { + config *podnodeconstraints.PodNodeConstraintsConfig + resource runtime.Object + kind schema.GroupKind + groupresource schema.GroupResource + userinfo user.Info + reviewResponse *authorizationv1.SubjectAccessReviewResponse + expectedResource string + expectedErrorMsg string + }{ + // 0: expect unspecified defaults to not error + { + config: emptyConfig(), + resource: defaultPod(), + userinfo: serviceaccount.UserInfo("", "", ""), + reviewResponse: reviewResponse(false, ""), + expectedResource: "pods/binding", + expectedErrorMsg: "", + }, + // 1: expect nodeSelector to error with user which lacks "pods/binding" access + { + config: testConfig(), + resource: nodeSelectorPod(), + userinfo: serviceaccount.UserInfo("", "", ""), + reviewResponse: reviewResponse(false, ""), + expectedResource: "pods/binding", + expectedErrorMsg: "node selection by label(s) [bogus] is prohibited by policy for your role", + }, + // 2: expect nodeName to fail with user that lacks "pods/binding" access + { + config: testConfig(), + resource: nodeNamePod(), + userinfo: serviceaccount.UserInfo("herpy", "derpy", ""), + reviewResponse: reviewResponse(false, ""), + expectedResource: "pods/binding", + expectedErrorMsg: "node selection by nodeName is prohibited by policy for your role", + }, + // 3: expect nodeName and nodeSelector to fail with user that lacks "pods/binding" access + { + config: testConfig(), + resource: nodeNameNodeSelectorPod(), + userinfo: serviceaccount.UserInfo("herpy", "derpy", ""), + reviewResponse: reviewResponse(false, ""), + expectedResource: "pods/binding", + expectedErrorMsg: "node selection by nodeName and label(s) [bogus] is prohibited by policy for your role", + }, + // 4: expect nodeSelector to succeed with user that has "pods/binding" access + { + config: testConfig(), + resource: nodeSelectorPod(), + userinfo: serviceaccount.UserInfo("openshift-infra", "daemonset-controller", ""), + reviewResponse: reviewResponse(true, ""), + expectedResource: "pods/binding", + expectedErrorMsg: "", + }, + // 5: expect nodeName to succeed with user that has "pods/binding" access + { + config: testConfig(), + resource: nodeNamePod(), + userinfo: serviceaccount.UserInfo("openshift-infra", "daemonset-controller", ""), + reviewResponse: reviewResponse(true, ""), + expectedResource: "pods/binding", + expectedErrorMsg: "", + }, + // 6: expect nil config to bypass admission + { + config: nil, + resource: defaultPod(), + userinfo: serviceaccount.UserInfo("", "", ""), + reviewResponse: reviewResponse(false, ""), + expectedResource: "pods/binding", + expectedErrorMsg: "", + }, + // 7: expect nodeName to succeed with node user self targeting mirror pod + { + config: testConfig(), + resource: nodeNameMirrorPod(), + userinfo: &user.DefaultInfo{Name: "system:node:frank", Groups: []string{user.NodesGroup}}, + expectedErrorMsg: "", + }, + // 8: expect nodeName to fail with node user self targeting non-mirror pod + { + config: testConfig(), + resource: nodeNamePod(), + userinfo: &user.DefaultInfo{Name: "system:node:frank", Groups: []string{user.NodesGroup}}, + expectedErrorMsg: "node selection by nodeName is prohibited by policy for your role", + }, + // 9: expect nodeName to fail with node user non-self targeting mirror pod + { + config: testConfig(), + resource: nodeNameMirrorPod(), + userinfo: &user.DefaultInfo{Name: "system:node:bob", Groups: []string{user.NodesGroup}}, + expectedErrorMsg: "node selection by nodeName is prohibited by policy for your role", + }, + // 10: expect nodeName to fail with node user non-self targeting non-mirror pod + { + config: testConfig(), + resource: nodeNamePod(), + userinfo: &user.DefaultInfo{Name: "system:node:bob", Groups: []string{user.NodesGroup}}, + expectedErrorMsg: "node selection by nodeName is prohibited by policy for your role", + }, + } + for i, tc := range tests { + var expectedError error + errPrefix := fmt.Sprintf("%d", i) + prc := NewPodNodeConstraints(tc.config, nodeidentifier.NewDefaultNodeIdentifier()) + prc.(initializer.WantsAuthorizer).SetAuthorizer(fakeAuthorizer(t)) + err := prc.(admission.InitializationValidator).ValidateInitialization() + if err != nil { + checkAdmitError(t, err, expectedError, errPrefix) + continue + } + attrs := admission.NewAttributesRecord(tc.resource, nil, kapi.Kind("Pod").WithVersion("version"), ns, "test", kapi.Resource("pods").WithVersion("version"), "", admission.Create, nil, false, tc.userinfo) + if tc.expectedErrorMsg != "" { + expectedError = admission.NewForbidden(attrs, fmt.Errorf(tc.expectedErrorMsg)) + } + err = prc.(admission.ValidationInterface).Validate(context.TODO(), attrs, nil) + checkAdmitError(t, err, expectedError, errPrefix) + } +} + +func TestPodNodeConstraintsPodUpdate(t *testing.T) { + ns := metav1.NamespaceDefault + var expectedError error + errPrefix := "PodUpdate" + prc := NewPodNodeConstraints(testConfig(), nodeidentifier.NewDefaultNodeIdentifier()) + prc.(initializer.WantsAuthorizer).SetAuthorizer(fakeAuthorizer(t)) + err := prc.(admission.InitializationValidator).ValidateInitialization() + if err != nil { + checkAdmitError(t, err, expectedError, errPrefix) + return + } + attrs := admission.NewAttributesRecord(nodeNamePod(), nodeNamePod(), kapi.Kind("Pod").WithVersion("version"), ns, "test", kapi.Resource("pods").WithVersion("version"), "", admission.Update, nil, false, serviceaccount.UserInfo("", "", "")) + err = prc.(admission.ValidationInterface).Validate(context.TODO(), attrs, nil) + checkAdmitError(t, err, expectedError, errPrefix) +} + +func TestPodNodeConstraintsNonHandledResources(t *testing.T) { + ns := metav1.NamespaceDefault + errPrefix := "ResourceQuotaTest" + var expectedError error + prc := NewPodNodeConstraints(testConfig(), nodeidentifier.NewDefaultNodeIdentifier()) + prc.(initializer.WantsAuthorizer).SetAuthorizer(fakeAuthorizer(t)) + err := prc.(admission.InitializationValidator).ValidateInitialization() + if err != nil { + checkAdmitError(t, err, expectedError, errPrefix) + return + } + attrs := admission.NewAttributesRecord(resourceQuota(), nil, kapi.Kind("ResourceQuota").WithVersion("version"), ns, "test", kapi.Resource("resourcequotas").WithVersion("version"), "", admission.Create, nil, false, serviceaccount.UserInfo("", "", "")) + err = prc.(admission.ValidationInterface).Validate(context.TODO(), attrs, nil) + checkAdmitError(t, err, expectedError, errPrefix) +} + +func emptyConfig() *podnodeconstraints.PodNodeConstraintsConfig { + return &podnodeconstraints.PodNodeConstraintsConfig{} +} + +func testConfig() *podnodeconstraints.PodNodeConstraintsConfig { + return &podnodeconstraints.PodNodeConstraintsConfig{ + NodeSelectorLabelBlacklist: []string{"bogus"}, + } +} + +func defaultPod() *kapi.Pod { + pod := &kapi.Pod{} + return pod +} + +func nodeNameNodeSelectorPod() *kapi.Pod { + pod := &kapi.Pod{} + pod.Spec.NodeName = "frank" + pod.Spec.NodeSelector = map[string]string{"bogus": "frank"} + return pod +} + +func nodeNamePod() *kapi.Pod { + pod := &kapi.Pod{} + pod.Spec.NodeName = "frank" + return pod +} + +func nodeNameMirrorPod() *kapi.Pod { + pod := &kapi.Pod{} + pod.Annotations = map[string]string{kapi.MirrorPodAnnotationKey: "true"} + pod.Spec.NodeName = "frank" + return pod +} + +func nodeSelectorPod() *kapi.Pod { + pod := &kapi.Pod{} + pod.Spec.NodeSelector = map[string]string{"bogus": "frank"} + return pod +} + +func resourceQuota() runtime.Object { + rq := &kapi.ResourceQuota{} + return rq +} + +func checkAdmitError(t *testing.T, err error, expectedError error, prefix string) { + switch { + case expectedError == nil && err == nil: + // continue + case expectedError != nil && err != nil && err.Error() != expectedError.Error(): + t.Errorf("%s: expected error %q, got: %q", prefix, expectedError.Error(), err.Error()) + case expectedError == nil && err != nil: + t.Errorf("%s: expected no error, got: %q", prefix, err.Error()) + case expectedError != nil && err == nil: + t.Errorf("%s: expected error %q, no error received", prefix, expectedError.Error()) + } +} + +type fakeTestAuthorizer struct { + t *testing.T +} + +func fakeAuthorizer(t *testing.T) authorizer.Authorizer { + return &fakeTestAuthorizer{ + t: t, + } +} + +func (a *fakeTestAuthorizer) Authorize(_ context.Context, attributes authorizer.Attributes) (authorizer.Decision, string, error) { + ui := attributes.GetUser() + if ui == nil { + return authorizer.DecisionNoOpinion, "", fmt.Errorf("No valid UserInfo for Context") + } + // User with pods/bindings. permission: + if ui.GetName() == "system:serviceaccount:openshift-infra:daemonset-controller" { + return authorizer.DecisionAllow, "", nil + } + // User without pods/bindings. permission: + return authorizer.DecisionNoOpinion, "", nil +} + +func reviewResponse(allowed bool, msg string) *authorizationv1.SubjectAccessReviewResponse { + return &authorizationv1.SubjectAccessReviewResponse{ + Allowed: allowed, + Reason: msg, + } +} + +func TestReadConfig(t *testing.T) { + configStr := `apiVersion: scheduling.openshift.io/v1 +kind: PodNodeConstraintsConfig +nodeSelectorLabelBlacklist: + - bogus + - foo +` + buf := bytes.NewBufferString(configStr) + config, err := readConfig(buf) + if err != nil { + t.Fatalf("unexpected error reading config: %v", err) + } + if len(config.NodeSelectorLabelBlacklist) == 0 { + t.Fatalf("NodeSelectorLabelBlacklist didn't take specified value") + } +} diff --git a/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/doc.go b/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/doc.go new file mode 100644 index 0000000000000..dfdf50a8102f0 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/doc.go @@ -0,0 +1,44 @@ +/* +Package podnodeconstraints contains the PodNodeConstraints admission +control plugin. This plugin allows administrators to set policy +governing the use of the NodeName and NodeSelector attributes in pod +specs. + +Enabling this plugin will prevent the use of the NodeName field in Pod +templates for users and serviceaccounts which lack the "pods/binding" +permission, and which don't belong to groups which have the +"pods/binding" permission. + +This plugin will also prevent users, serviceaccounts and groups which +lack the "pods/binding" permission from specifying the NodeSelector field +in Pod templates for labels which appear in the +nodeSelectorLabelBlacklist list field. + +Configuration + +The plugin is configured via a PodNodeConstraintsConfig object in the +origin and kubernetes Master configs: + +admissionConfig: + pluginConfig: + PodNodeConstraints: + configuration: + apiVersion: v1 + kind: PodNodeConstraintsConfig + nodeSelectorLabelBlacklist: + - label1 + - label2 +... +kubernetesMasterConfig: + admissionConfig: + pluginConfig: + PodNodeConstraints: + configuration: + apiVersion: v1 + kind: PodNodeConstraintsConfig + nodeSelectorLabelBlacklist: + - label1 + - label2 +*/ + +package podnodeconstraints diff --git a/openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity/admission.go b/openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity/admission.go new file mode 100644 index 0000000000000..35e249acc3602 --- /dev/null +++ b/openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity/admission.go @@ -0,0 +1,281 @@ +package csiinlinevolumesecurity + +import ( + "context" + "fmt" + "io" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/warning" + "k8s.io/client-go/informers" + corev1listers "k8s.io/client-go/listers/core/v1" + storagev1listers "k8s.io/client-go/listers/storage/v1" + "k8s.io/klog/v2" + appsapi "k8s.io/kubernetes/pkg/apis/apps" + batchapi "k8s.io/kubernetes/pkg/apis/batch" + coreapi "k8s.io/kubernetes/pkg/apis/core" + podsecapi "k8s.io/pod-security-admission/api" +) + +const ( + // Plugin name + PluginName = "storage.openshift.io/CSIInlineVolumeSecurity" + // Label on the CSIDriver to declare the driver's effective pod security profile + csiInlineVolProfileLabel = "security.openshift.io/csi-ephemeral-volume-profile" + // Default values for the profile labels when no such label exists + defaultCSIInlineVolProfile = podsecapi.LevelPrivileged + defaultPodSecEnforceProfile = podsecapi.LevelRestricted + defaultPodSecWarnProfile = podsecapi.LevelRestricted + defaultPodSecAuditProfile = podsecapi.LevelRestricted + // Format string used for audit/warn/enforce response messages + admissionResponseFormatStr = "%s uses an inline volume provided by CSIDriver %s and namespace %s has a pod security %s level that is lower than %s" +) + +var ( + podSpecResources = map[schema.GroupResource]bool{ + coreapi.Resource("pods"): true, + coreapi.Resource("replicationcontrollers"): true, + coreapi.Resource("podtemplates"): true, + appsapi.Resource("replicasets"): true, + appsapi.Resource("deployments"): true, + appsapi.Resource("statefulsets"): true, + appsapi.Resource("daemonsets"): true, + batchapi.Resource("jobs"): true, + batchapi.Resource("cronjobs"): true, + } +) + +var _ = initializer.WantsExternalKubeInformerFactory(&csiInlineVolSec{}) +var _ = admission.ValidationInterface(&csiInlineVolSec{}) + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, + func(config io.Reader) (admission.Interface, error) { + return &csiInlineVolSec{ + Handler: admission.NewHandler(admission.Create), + }, nil + }) +} + +// csiInlineVolSec validates whether the namespace has permission to use a given +// CSI driver as an inline volume. +type csiInlineVolSec struct { + *admission.Handler + //enabled bool + //inspectedFeatureGates bool + defaultPolicy podsecapi.Policy + nsLister corev1listers.NamespaceLister + nsListerSynced func() bool + csiDriverLister storagev1listers.CSIDriverLister + csiDriverListSynced func() bool + podSpecExtractor PodSpecExtractor +} + +// SetExternalKubeInformerFactory registers an informer +func (c *csiInlineVolSec) SetExternalKubeInformerFactory(kubeInformers informers.SharedInformerFactory) { + c.nsLister = kubeInformers.Core().V1().Namespaces().Lister() + c.nsListerSynced = kubeInformers.Core().V1().Namespaces().Informer().HasSynced + c.csiDriverLister = kubeInformers.Storage().V1().CSIDrivers().Lister() + c.csiDriverListSynced = kubeInformers.Storage().V1().CSIDrivers().Informer().HasSynced + c.podSpecExtractor = &OCPPodSpecExtractor{} + c.SetReadyFunc(func() bool { + return c.nsListerSynced() && c.csiDriverListSynced() + }) + + // set default pod security policy + c.defaultPolicy = podsecapi.Policy{ + Enforce: podsecapi.LevelVersion{ + Level: defaultPodSecEnforceProfile, + Version: podsecapi.GetAPIVersion(), + }, + Warn: podsecapi.LevelVersion{ + Level: defaultPodSecWarnProfile, + Version: podsecapi.GetAPIVersion(), + }, + Audit: podsecapi.LevelVersion{ + Level: defaultPodSecAuditProfile, + Version: podsecapi.GetAPIVersion(), + }, + } +} + +func (c *csiInlineVolSec) ValidateInitialization() error { + if c.nsLister == nil { + return fmt.Errorf("%s plugin needs a namespace lister", PluginName) + } + if c.nsListerSynced == nil { + return fmt.Errorf("%s plugin needs a namespace lister synced", PluginName) + } + if c.csiDriverLister == nil { + return fmt.Errorf("%s plugin needs a node lister", PluginName) + } + if c.csiDriverListSynced == nil { + return fmt.Errorf("%s plugin needs a node lister synced", PluginName) + } + if c.podSpecExtractor == nil { + return fmt.Errorf("%s plugin needs a pod spec extractor", PluginName) + } + return nil +} + +func (c *csiInlineVolSec) PolicyToEvaluate(labels map[string]string) (podsecapi.Policy, field.ErrorList) { + return podsecapi.PolicyToEvaluate(labels, c.defaultPolicy) +} + +func (c *csiInlineVolSec) Validate(ctx context.Context, attrs admission.Attributes, o admission.ObjectInterfaces) error { + // Only validate applicable resources + gr := attrs.GetResource().GroupResource() + if !podSpecResources[gr] { + return nil + } + // Do not validate subresources + if attrs.GetSubresource() != "" { + return nil + } + + // Get namespace + namespace, err := c.nsLister.Get(attrs.GetNamespace()) + if err != nil { + return admission.NewForbidden(attrs, fmt.Errorf("failed to get namespace: %v", err)) + } + // Require valid labels if they exist (the default policy is always valid) + nsPolicy, nsPolicyErrs := c.PolicyToEvaluate(namespace.Labels) + if len(nsPolicyErrs) > 0 { + return admission.NewForbidden(attrs, fmt.Errorf("invalid policy found on namespace %s: %v", namespace, nsPolicyErrs)) + } + // If the namespace policy is fully privileged, no need to evaluate further + // because it is allowed to use any inline volumes. + if nsPolicy.FullyPrivileged() { + return nil + } + + // Extract the pod spec to evaluate + obj := attrs.GetObject() + _, podSpec, err := c.podSpecExtractor.ExtractPodSpec(obj) + if err != nil { + return admission.NewForbidden(attrs, fmt.Errorf("failed to extract pod spec: %v", err)) + } + // If an object with an optional pod spec does not contain a pod spec, skip validation + if podSpec == nil { + return nil + } + + klogV := klog.V(5) + if klogV.Enabled() { + klogV.InfoS("CSIInlineVolumeSecurity evaluation", "policy", fmt.Sprintf("%v", nsPolicy), "op", attrs.GetOperation(), "resource", attrs.GetResource(), "namespace", attrs.GetNamespace(), "name", attrs.GetName()) + } + + // For each inline volume, find the CSIDriver and ensure the profile on the + // driver is allowed by the pod security profile on the namespace. + // If it is not: create errors, warnings, and audit as defined by policy. + for _, vol := range podSpec.Volumes { + // Only check for inline volumes + if vol.CSI == nil { + continue + } + + // Get the policy level for the CSIDriver + driverName := vol.CSI.Driver + driverLevel, err := c.getCSIDriverLevel(driverName) + if err != nil { + return admission.NewForbidden(attrs, err) + } + + // Compare CSIDriver level to the policy for the namespace + if podsecapi.CompareLevels(nsPolicy.Enforce.Level, driverLevel) > 0 { + // Not permitted, enforce error and deny admission + return admission.NewForbidden(attrs, fmt.Errorf(admissionResponseFormatStr, attrs.GetName(), driverName, attrs.GetNamespace(), "enforce", driverLevel)) + } + if podsecapi.CompareLevels(nsPolicy.Warn.Level, driverLevel) > 0 { + // Violates policy warn level, add warning + warning.AddWarning(ctx, "", fmt.Sprintf(admissionResponseFormatStr, attrs.GetName(), driverName, attrs.GetNamespace(), "warn", driverLevel)) + } + if podsecapi.CompareLevels(nsPolicy.Audit.Level, driverLevel) > 0 { + // Violates policy audit level, add audit annotation + auditMessageString := fmt.Sprintf(admissionResponseFormatStr, attrs.GetName(), driverName, attrs.GetNamespace(), "audit", driverLevel) + audit.AddAuditAnnotation(ctx, PluginName, auditMessageString) + } + } + + return nil +} + +// getCSIDriverLevel returns the effective policy level for the CSIDriver. +// If the driver is found and it has the label, use that policy. +// If the driver or the label is missing, default to the privileged policy. +func (c *csiInlineVolSec) getCSIDriverLevel(driverName string) (podsecapi.Level, error) { + driverLevel := defaultCSIInlineVolProfile + driver, err := c.csiDriverLister.Get(driverName) + if err != nil { + return driverLevel, nil + } + + csiDriverLabel, ok := driver.ObjectMeta.Labels[csiInlineVolProfileLabel] + if !ok { + return driverLevel, nil + } + + driverLevel, err = podsecapi.ParseLevel(csiDriverLabel) + if err != nil { + return driverLevel, fmt.Errorf("invalid label %s for CSIDriver %s: %v", csiInlineVolProfileLabel, driverName, err) + } + + return driverLevel, nil +} + +// PodSpecExtractor extracts a PodSpec from pod-controller resources that embed a PodSpec. +// This is the same as what is used in the pod-security-admission plugin (see +// staging/src/k8s.io/pod-security-admission/admission/admission.go) except here we +// are provided coreapi resources instead of corev1, which changes the interface. +type PodSpecExtractor interface { + // HasPodSpec returns true if the given resource type MAY contain an extractable PodSpec. + HasPodSpec(schema.GroupResource) bool + // ExtractPodSpec returns a pod spec and metadata to evaluate from the object. + // An error returned here does not block admission of the pod-spec-containing object and is not returned to the user. + // If the object has no pod spec, return `nil, nil, nil`. + ExtractPodSpec(runtime.Object) (*metav1.ObjectMeta, *coreapi.PodSpec, error) +} + +type OCPPodSpecExtractor struct{} + +func (OCPPodSpecExtractor) HasPodSpec(gr schema.GroupResource) bool { + return podSpecResources[gr] +} + +func (OCPPodSpecExtractor) ExtractPodSpec(obj runtime.Object) (*metav1.ObjectMeta, *coreapi.PodSpec, error) { + switch o := obj.(type) { + case *coreapi.Pod: + return &o.ObjectMeta, &o.Spec, nil + case *coreapi.PodTemplate: + return extractPodSpecFromTemplate(&o.Template) + case *coreapi.ReplicationController: + return extractPodSpecFromTemplate(o.Spec.Template) + case *appsapi.ReplicaSet: + return extractPodSpecFromTemplate(&o.Spec.Template) + case *appsapi.Deployment: + return extractPodSpecFromTemplate(&o.Spec.Template) + case *appsapi.DaemonSet: + return extractPodSpecFromTemplate(&o.Spec.Template) + case *appsapi.StatefulSet: + return extractPodSpecFromTemplate(&o.Spec.Template) + case *batchapi.Job: + return extractPodSpecFromTemplate(&o.Spec.Template) + case *batchapi.CronJob: + return extractPodSpecFromTemplate(&o.Spec.JobTemplate.Spec.Template) + default: + return nil, nil, fmt.Errorf("unexpected object type: %s", obj.GetObjectKind().GroupVersionKind().String()) + } +} + +func extractPodSpecFromTemplate(template *coreapi.PodTemplateSpec) (*metav1.ObjectMeta, *coreapi.PodSpec, error) { + if template == nil { + return nil, nil, nil + } + return &template.ObjectMeta, &template.Spec, nil +} diff --git a/openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity/admission_test.go b/openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity/admission_test.go new file mode 100644 index 0000000000000..d69a03256a5b1 --- /dev/null +++ b/openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity/admission_test.go @@ -0,0 +1,508 @@ +package csiinlinevolumesecurity + +import ( + "context" + "fmt" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/user" + corev1listers "k8s.io/client-go/listers/core/v1" + storagev1listers "k8s.io/client-go/listers/storage/v1" + "k8s.io/client-go/tools/cache" + appsapi "k8s.io/kubernetes/pkg/apis/apps" + batchapi "k8s.io/kubernetes/pkg/apis/batch" + coreapi "k8s.io/kubernetes/pkg/apis/core" + podsecapi "k8s.io/pod-security-admission/api" +) + +const ( + defaultNamespaceName = "test-namespace" + defaultCSIDriverName = "test-driver" + + // expected error string when privileged namespace is required + privNamespaceRequiredError = "has a pod security enforce level that is lower than privileged" +) + +func getMockCSIInlineVolSec(namespace *corev1.Namespace, driver *storagev1.CSIDriver) (*csiInlineVolSec, error) { + c := &csiInlineVolSec{ + Handler: admission.NewHandler(admission.Create), + defaultPolicy: podsecapi.Policy{ + Enforce: podsecapi.LevelVersion{ + Level: defaultPodSecEnforceProfile, + Version: podsecapi.GetAPIVersion(), + }, + Warn: podsecapi.LevelVersion{ + Level: defaultPodSecWarnProfile, + Version: podsecapi.GetAPIVersion(), + }, + Audit: podsecapi.LevelVersion{ + Level: defaultPodSecAuditProfile, + Version: podsecapi.GetAPIVersion(), + }, + }, + nsLister: fakeNamespaceLister(namespace), + nsListerSynced: func() bool { return true }, + csiDriverLister: fakeCSIDriverLister(driver), + csiDriverListSynced: func() bool { return true }, + podSpecExtractor: &OCPPodSpecExtractor{}, + } + if err := c.ValidateInitialization(); err != nil { + return nil, err + } + + return c, nil +} + +func fakeNamespaceLister(ns *corev1.Namespace) corev1listers.NamespaceLister { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + _ = indexer.Add(ns) + return corev1listers.NewNamespaceLister(indexer) +} + +func fakeCSIDriverLister(driver *storagev1.CSIDriver) storagev1listers.CSIDriverLister { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + _ = indexer.Add(driver) + return storagev1listers.NewCSIDriverLister(indexer) +} + +func TestValidate(t *testing.T) { + type TestStruct struct { + name string + obj runtime.Object + namespace *corev1.Namespace + driver *storagev1.CSIDriver + expectedError error + } + + tests := []TestStruct{ + { + name: "should allow pods with no volumes", + obj: testPod(), + namespace: testNamespaceNoLabels(), + driver: testCSIDriverNoLabels(), + }, + { + name: "should allow pods with inline volumes in a baseline namespace when the driver uses the baseline label", + obj: testPodWithInlineVol(), + namespace: testNamespaceBaseline(), + driver: testCSIDriverBaseline(), + }, + { + name: "should allow pods with inline volumes in a baseline namespace when the driver uses the restricted label", + obj: testPodWithInlineVol(), + namespace: testNamespaceBaseline(), + driver: testCSIDriverRestricted(), + }, + { + name: "should deny pod admission with inline volumes if the CSI driver is not found and namespace is restricted", + obj: testPodWithInvalidDriverName(), + namespace: testNamespaceRestricted(), + driver: testCSIDriverRestricted(), + expectedError: fmt.Errorf(privNamespaceRequiredError), + }, + { + name: "should allow pod admission with inline volumes if the CSI driver is not found and namespace is privileged", + obj: testPodWithInvalidDriverName(), + namespace: testNamespacePrivileged(), + driver: testCSIDriverRestricted(), + }, + { + name: "should deny pod admission if the CSI driver has an invalid profile label", + obj: testPodWithInlineVol(), + namespace: testNamespaceBaseline(), + driver: testCSIDriverInvalid(), + expectedError: fmt.Errorf("invalid label security.openshift.io/csi-ephemeral-volume-profile for CSIDriver test-driver: must be one of privileged, baseline, restricted"), + }, + { + name: "should deny pod admission if the namespace has an invalid profile label", + obj: testPodWithInlineVol(), + namespace: testNamespaceInvalid(), + driver: testCSIDriverRestricted(), + expectedError: fmt.Errorf("Invalid value: \"invalid-value\": must be one of privileged, baseline, restricted"), + }, + { + name: "should ignore types that do not have a pod spec", + obj: &coreapi.Service{}, + namespace: testNamespaceNoLabels(), + driver: testCSIDriverNoLabels(), + }, + } + + podSpecableObjects := []struct { + name string + obj runtime.Object + }{ + {"Pod", &coreapi.Pod{}}, + {"PodTemplate", &coreapi.PodTemplate{}}, + {"ReplicationController", &coreapi.ReplicationController{}}, + {"ReplicaSet", &appsapi.ReplicaSet{}}, + {"Deployment", &appsapi.Deployment{}}, + {"DaemonSet", &appsapi.DaemonSet{}}, + {"StatefulSet", &appsapi.StatefulSet{}}, + {"Job", &batchapi.Job{}}, + {"CronJob", &batchapi.CronJob{}}, + } + + // Add a standard subset of the tests for each supported object type + for _, pso := range podSpecableObjects { + objTests := []TestStruct{ + { + name: fmt.Sprintf("should deny %s admission by default when it has an inline volume and no policy is defined", pso.name), + obj: createPodControllerObject(pso.obj, testPodWithInlineVol()), + namespace: testNamespaceNoLabels(), + driver: testCSIDriverNoLabels(), + expectedError: fmt.Errorf(privNamespaceRequiredError), + }, + { + name: fmt.Sprintf("should deny %s admission with inline volumes in a baseline namespace when the driver uses the privileged label", pso.name), + obj: createPodControllerObject(pso.obj, testPodWithInlineVol()), + namespace: testNamespaceBaseline(), + driver: testCSIDriverPrivileged(), + expectedError: fmt.Errorf(privNamespaceRequiredError), + }, + { + name: fmt.Sprintf("should allow %s with only persistent volume claims", pso.name), + obj: createPodControllerObject(pso.obj, testPodWithPVC()), + namespace: testNamespaceNoLabels(), + driver: testCSIDriverNoLabels(), + }, + { + name: fmt.Sprintf("should allow %s with inline volumes when running in a privileged namespace", pso.name), + obj: createPodControllerObject(pso.obj, testPodWithInlineVol()), + namespace: testNamespacePrivileged(), + driver: testCSIDriverNoLabels(), + }, + { + name: fmt.Sprintf("should allow %s with inline volumes in a restricted namespace when the driver uses the restricted label", pso.name), + obj: createPodControllerObject(pso.obj, testPodWithInlineVol()), + namespace: testNamespaceRestricted(), + driver: testCSIDriverRestricted(), + }, + } + + tests = append(tests, objTests...) + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + c, err := getMockCSIInlineVolSec(test.namespace, test.driver) + if err != nil { + t.Fatalf("%s: failed getMockCSIInlineVolSec: %v", test.name, err) + } + + ns := test.namespace.Name + name := test.obj.(metav1.Object).GetName() + gvr := getObjectGroupVersionResource(test.obj) + attrs := admission.NewAttributesRecord(test.obj, nil, schema.GroupVersionKind{}, ns, name, gvr, "", admission.Create, nil, false, fakeUser()) + + err = c.Validate(context.TODO(), attrs, nil) + if err != nil { + if test.expectedError == nil { + t.Fatalf("%s: admission controller returned error: %v", test.name, err) + } + + if !strings.Contains(err.Error(), test.expectedError.Error()) { + t.Fatalf("%s: the expected error %v, got %v", test.name, test.expectedError, err) + } + } + + if err == nil && test.expectedError != nil { + t.Fatalf("%s: the expected error %v, got nil", test.name, test.expectedError) + } + }) + } +} + +func fakeUser() user.Info { + return &user.DefaultInfo{ + Name: "testuser", + } +} + +func testNamespaceNoLabels() *corev1.Namespace { + return &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: defaultNamespaceName, + }, + } +} + +func testNamespaceRestricted() *corev1.Namespace { + ns := testNamespaceNoLabels() + ns.Labels = map[string]string{ + "pod-security.kubernetes.io/audit": "restricted", + "pod-security.kubernetes.io/enforce": "restricted", + "pod-security.kubernetes.io/warn": "restricted", + } + return ns +} + +func testNamespaceBaseline() *corev1.Namespace { + ns := testNamespaceNoLabels() + ns.Labels = map[string]string{ + "pod-security.kubernetes.io/audit": "baseline", + "pod-security.kubernetes.io/enforce": "baseline", + "pod-security.kubernetes.io/warn": "baseline", + } + return ns +} + +func testNamespacePrivileged() *corev1.Namespace { + ns := testNamespaceNoLabels() + ns.Labels = map[string]string{ + "pod-security.kubernetes.io/audit": "privileged", + "pod-security.kubernetes.io/enforce": "privileged", + "pod-security.kubernetes.io/warn": "privileged", + } + return ns +} + +func testNamespaceInvalid() *corev1.Namespace { + ns := testNamespaceNoLabels() + ns.Labels = map[string]string{ + "pod-security.kubernetes.io/audit": "invalid-value", + "pod-security.kubernetes.io/enforce": "invalid-value", + "pod-security.kubernetes.io/warn": "invalid-value", + } + return ns +} + +func testCSIDriverNoLabels() *storagev1.CSIDriver { + return &storagev1.CSIDriver{ + ObjectMeta: metav1.ObjectMeta{ + Name: defaultCSIDriverName, + }, + Spec: storagev1.CSIDriverSpec{ + VolumeLifecycleModes: []storagev1.VolumeLifecycleMode{ + storagev1.VolumeLifecycleEphemeral, + }, + }, + } +} + +func testCSIDriverRestricted() *storagev1.CSIDriver { + driver := testCSIDriverNoLabels() + driver.Labels = map[string]string{ + csiInlineVolProfileLabel: "restricted", + } + return driver +} + +func testCSIDriverBaseline() *storagev1.CSIDriver { + driver := testCSIDriverNoLabels() + driver.Labels = map[string]string{ + csiInlineVolProfileLabel: "baseline", + } + return driver +} + +func testCSIDriverPrivileged() *storagev1.CSIDriver { + driver := testCSIDriverNoLabels() + driver.Labels = map[string]string{ + csiInlineVolProfileLabel: "privileged", + } + return driver +} + +func testCSIDriverInvalid() *storagev1.CSIDriver { + driver := testCSIDriverNoLabels() + driver.Labels = map[string]string{ + csiInlineVolProfileLabel: "invalid-value", + } + return driver +} + +func testPod() *coreapi.Pod { + pod := &coreapi.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: defaultNamespaceName, + }, + Spec: coreapi.PodSpec{ + InitContainers: []coreapi.Container{ + { + Name: "initTest", + }, + }, + Containers: []coreapi.Container{ + { + Name: "test", + }, + }, + }, + } + + return pod +} + +func testPodWithInlineVol() *coreapi.Pod { + pod := testPod() + pod.Spec.Volumes = []coreapi.Volume{ + { + Name: "test-vol", + VolumeSource: coreapi.VolumeSource{ + CSI: &coreapi.CSIVolumeSource{ + Driver: defaultCSIDriverName, + }, + }, + }, + } + return pod +} + +func testPodWithPVC() *coreapi.Pod { + pod := testPod() + pod.Spec.Volumes = []coreapi.Volume{ + { + Name: "test-vol", + VolumeSource: coreapi.VolumeSource{ + PersistentVolumeClaim: &coreapi.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-pvc", + }, + }, + }, + } + return pod +} + +func testPodWithInvalidDriverName() *coreapi.Pod { + pod := testPod() + pod.Spec.Volumes = []coreapi.Volume{ + { + Name: "test-vol", + VolumeSource: coreapi.VolumeSource{ + CSI: &coreapi.CSIVolumeSource{ + Driver: "invalid-csi-driver", + }, + }, + }, + } + return pod +} + +// Creates a pod controller object, given an object type and a pod for the template +func createPodControllerObject(obj runtime.Object, pod *coreapi.Pod) runtime.Object { + switch obj.(type) { + case *coreapi.Pod: + return pod + case *coreapi.PodTemplate: + return &coreapi.PodTemplate{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pod-template"}, + Template: coreapi.PodTemplateSpec{ + ObjectMeta: pod.ObjectMeta, + Spec: pod.Spec, + }, + } + case *coreapi.ReplicationController: + return &coreapi.ReplicationController{ + ObjectMeta: metav1.ObjectMeta{Name: "test-repl-controller"}, + Spec: coreapi.ReplicationControllerSpec{ + Template: &coreapi.PodTemplateSpec{ + ObjectMeta: pod.ObjectMeta, + Spec: pod.Spec, + }, + }, + } + case *appsapi.ReplicaSet: + return &appsapi.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test-replicaset"}, + Spec: appsapi.ReplicaSetSpec{ + Template: coreapi.PodTemplateSpec{ + ObjectMeta: pod.ObjectMeta, + Spec: pod.Spec, + }, + }, + } + case *appsapi.Deployment: + return &appsapi.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test-deployment"}, + Spec: appsapi.DeploymentSpec{ + Template: coreapi.PodTemplateSpec{ + ObjectMeta: pod.ObjectMeta, + Spec: pod.Spec, + }, + }, + } + case *appsapi.DaemonSet: + return &appsapi.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test-daemonset"}, + Spec: appsapi.DaemonSetSpec{ + Template: coreapi.PodTemplateSpec{ + ObjectMeta: pod.ObjectMeta, + Spec: pod.Spec, + }, + }, + } + case *appsapi.StatefulSet: + return &appsapi.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test-statefulset"}, + Spec: appsapi.StatefulSetSpec{ + Template: coreapi.PodTemplateSpec{ + ObjectMeta: pod.ObjectMeta, + Spec: pod.Spec, + }, + }, + } + case *batchapi.Job: + return &batchapi.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "test-job"}, + Spec: batchapi.JobSpec{ + Template: coreapi.PodTemplateSpec{ + ObjectMeta: pod.ObjectMeta, + Spec: pod.Spec, + }, + }, + } + case *batchapi.CronJob: + return &batchapi.CronJob{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cronjob"}, + Spec: batchapi.CronJobSpec{ + JobTemplate: batchapi.JobTemplateSpec{ + Spec: batchapi.JobSpec{ + Template: coreapi.PodTemplateSpec{ + ObjectMeta: pod.ObjectMeta, + Spec: pod.Spec, + }, + }, + }, + }, + } + default: + // If we can't add a pod template, just return the provided object. + return obj + } +} + +func getObjectGroupVersionResource(obj runtime.Object) schema.GroupVersionResource { + ver := "version" + switch obj.(type) { + case *coreapi.Pod: + return coreapi.Resource("pods").WithVersion(ver) + case *coreapi.PodTemplate: + return coreapi.Resource("podtemplates").WithVersion(ver) + case *coreapi.ReplicationController: + return coreapi.Resource("replicationcontrollers").WithVersion(ver) + case *appsapi.ReplicaSet: + return appsapi.Resource("replicasets").WithVersion(ver) + case *appsapi.Deployment: + return appsapi.Resource("deployments").WithVersion(ver) + case *appsapi.DaemonSet: + return appsapi.Resource("daemonsets").WithVersion(ver) + case *appsapi.StatefulSet: + return appsapi.Resource("statefulsets").WithVersion(ver) + case *batchapi.Job: + return batchapi.Resource("jobs").WithVersion(ver) + case *batchapi.CronJob: + return batchapi.Resource("cronjobs").WithVersion(ver) + default: + // If it's not a recognized object, return something invalid. + return coreapi.Resource("invalidresource").WithVersion("invalidversion") + } +} diff --git a/openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity/doc.go b/openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity/doc.go new file mode 100644 index 0000000000000..ad819a79135a8 --- /dev/null +++ b/openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity/doc.go @@ -0,0 +1,7 @@ +package csiinlinevolumesecurity + +// The CSIInlineVolumeSecurity admission plugin inspects inline volumes +// on pod creation and compares the security.openshift.io/csi-ephemeral-volume-profile +// label on the associated CSIDriver object to the pod security profile on the namespace. +// Admission is only allowed if the namespace enforces a profile of equal or greater +// permission compared to the profile label for the CSIDriver. diff --git a/openshift-kube-apiserver/authorization/browsersafe/authorizer.go b/openshift-kube-apiserver/authorization/browsersafe/authorizer.go new file mode 100644 index 0000000000000..2b39b309f69b8 --- /dev/null +++ b/openshift-kube-apiserver/authorization/browsersafe/authorizer.go @@ -0,0 +1,107 @@ +package browsersafe + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +const ( + proxyAction = "proxy" + unsafeProxy = "unsafeproxy" +) + +type browserSafeAuthorizer struct { + delegate authorizer.Authorizer + + // list of groups, any of which indicate the request is authenticated + authenticatedGroups sets.String +} + +func NewBrowserSafeAuthorizer(delegate authorizer.Authorizer, authenticatedGroups ...string) authorizer.Authorizer { + return &browserSafeAuthorizer{ + delegate: delegate, + authenticatedGroups: sets.NewString(authenticatedGroups...), + } +} + +func (a *browserSafeAuthorizer) Authorize(ctx context.Context, attributes authorizer.Attributes) (authorizer.Decision, string, error) { + attrs := a.getBrowserSafeAttributes(attributes) + decision, reason, err := a.delegate.Authorize(ctx, attrs) + safeAttributes, changed := attrs.(*browserSafeAttributes) + + // check if the request was not allowed and we changed the attributes + if decision == authorizer.DecisionAllow || !changed { + return decision, reason, err + } + + // if so, use this information to update the reason + return decision, safeAttributes.reason(reason), err +} + +func (a *browserSafeAuthorizer) getBrowserSafeAttributes(attributes authorizer.Attributes) authorizer.Attributes { + if !attributes.IsResourceRequest() { + return attributes + } + + isProxyVerb := attributes.GetVerb() == proxyAction + isProxySubresource := attributes.GetSubresource() == proxyAction + + if !isProxyVerb && !isProxySubresource { + // Requests to non-proxy resources don't expose HTML or HTTP-handling user content to browsers + return attributes + } + + if user := attributes.GetUser(); user != nil { + if a.authenticatedGroups.HasAny(user.GetGroups()...) { + // An authenticated request indicates this isn't a browser page load. + // Browsers cannot make direct authenticated requests. + // This depends on the API not enabling basic or cookie-based auth. + return attributes + } + } + + return &browserSafeAttributes{ + Attributes: attributes, + isProxyVerb: isProxyVerb, + isProxySubresource: isProxySubresource, + } +} + +type browserSafeAttributes struct { + authorizer.Attributes + + isProxyVerb, isProxySubresource bool +} + +func (b *browserSafeAttributes) GetVerb() string { + if b.isProxyVerb { + return unsafeProxy + } + return b.Attributes.GetVerb() +} + +func (b *browserSafeAttributes) GetSubresource() string { + if b.isProxySubresource { + return unsafeProxy + } + return b.Attributes.GetSubresource() +} + +func (b *browserSafeAttributes) reason(reason string) string { + if b.isProxyVerb { + if len(reason) != 0 { + reason += ", " + } + reason += fmt.Sprintf("%s verb changed to %s", proxyAction, unsafeProxy) + } + if b.isProxySubresource { + if len(reason) != 0 { + reason += ", " + } + reason += fmt.Sprintf("%s subresource changed to %s", proxyAction, unsafeProxy) + } + return reason +} diff --git a/openshift-kube-apiserver/authorization/browsersafe/authorizer_test.go b/openshift-kube-apiserver/authorization/browsersafe/authorizer_test.go new file mode 100644 index 0000000000000..1d14a86daddeb --- /dev/null +++ b/openshift-kube-apiserver/authorization/browsersafe/authorizer_test.go @@ -0,0 +1,80 @@ +package browsersafe + +import ( + "context" + "testing" + + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +func TestBrowserSafeAuthorizer(t *testing.T) { + for name, tc := range map[string]struct { + attributes authorizer.Attributes + + expectedVerb string + expectedSubresource string + expectedReason string + }{ + "non-resource": { + attributes: authorizer.AttributesRecord{ResourceRequest: false, Verb: "GET"}, + expectedVerb: "GET", + }, + + "non-proxy": { + attributes: authorizer.AttributesRecord{ResourceRequest: true, Verb: "get", Resource: "pods", Subresource: "logs"}, + expectedVerb: "get", + expectedSubresource: "logs", + }, + + "unsafe proxy subresource": { + attributes: authorizer.AttributesRecord{ResourceRequest: true, Verb: "get", Resource: "pods", Subresource: "proxy"}, + expectedVerb: "get", + expectedSubresource: "unsafeproxy", + expectedReason: "proxy subresource changed to unsafeproxy", + }, + "unsafe proxy verb": { + attributes: authorizer.AttributesRecord{ResourceRequest: true, Verb: "proxy", Resource: "nodes"}, + expectedVerb: "unsafeproxy", + expectedReason: "proxy verb changed to unsafeproxy", + }, + "unsafe proxy verb anonymous": { + attributes: authorizer.AttributesRecord{ResourceRequest: true, Verb: "proxy", Resource: "nodes", + User: &user.DefaultInfo{Name: "system:anonymous", Groups: []string{"system:unauthenticated"}}}, + expectedVerb: "unsafeproxy", + expectedReason: "proxy verb changed to unsafeproxy", + }, + + "proxy subresource authenticated": { + attributes: authorizer.AttributesRecord{ResourceRequest: true, Verb: "get", Resource: "pods", Subresource: "proxy", + User: &user.DefaultInfo{Name: "bob", Groups: []string{"system:authenticated"}}}, + expectedVerb: "get", + expectedSubresource: "proxy", + }, + } { + delegateAuthorizer := &recordingAuthorizer{} + safeAuthorizer := NewBrowserSafeAuthorizer(delegateAuthorizer, "system:authenticated") + + authorized, reason, err := safeAuthorizer.Authorize(context.TODO(), tc.attributes) + if authorized == authorizer.DecisionAllow || reason != tc.expectedReason || err != nil { + t.Errorf("%s: unexpected output: %v %s %v", name, authorized, reason, err) + continue + } + + if delegateAuthorizer.attributes.GetVerb() != tc.expectedVerb { + t.Errorf("%s: expected verb %s, got %s", name, tc.expectedVerb, delegateAuthorizer.attributes.GetVerb()) + } + if delegateAuthorizer.attributes.GetSubresource() != tc.expectedSubresource { + t.Errorf("%s: expected verb %s, got %s", name, tc.expectedSubresource, delegateAuthorizer.attributes.GetSubresource()) + } + } +} + +type recordingAuthorizer struct { + attributes authorizer.Attributes +} + +func (t *recordingAuthorizer) Authorize(_ context.Context, a authorizer.Attributes) (authorized authorizer.Decision, reason string, err error) { + t.attributes = a + return authorizer.DecisionNoOpinion, "", nil +} diff --git a/openshift-kube-apiserver/authorization/scopeauthorizer/authorizer.go b/openshift-kube-apiserver/authorization/scopeauthorizer/authorizer.go new file mode 100644 index 0000000000000..989f70609528d --- /dev/null +++ b/openshift-kube-apiserver/authorization/scopeauthorizer/authorizer.go @@ -0,0 +1,49 @@ +package scopeauthorizer + +import ( + "context" + "fmt" + + "k8s.io/apiserver/pkg/authorization/authorizer" + rbaclisters "k8s.io/client-go/listers/rbac/v1" + authorizerrbac "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac" + + authorizationv1 "github.com/openshift/api/authorization/v1" + "github.com/openshift/apiserver-library-go/pkg/authorization/scope" +) + +type scopeAuthorizer struct { + clusterRoleGetter rbaclisters.ClusterRoleLister +} + +func NewAuthorizer(clusterRoleGetter rbaclisters.ClusterRoleLister) authorizer.Authorizer { + return &scopeAuthorizer{clusterRoleGetter: clusterRoleGetter} +} + +func (a *scopeAuthorizer) Authorize(ctx context.Context, attributes authorizer.Attributes) (authorizer.Decision, string, error) { + user := attributes.GetUser() + if user == nil { + return authorizer.DecisionNoOpinion, "", fmt.Errorf("user missing from context") + } + + scopes := user.GetExtra()[authorizationv1.ScopesKey] + if len(scopes) == 0 { + return authorizer.DecisionNoOpinion, "", nil + } + + nonFatalErrors := "" + + // scopeResolutionErrors aren't fatal. If any of the scopes we find allow this, then the overall scope limits allow it + rules, err := scope.ScopesToRules(scopes, attributes.GetNamespace(), a.clusterRoleGetter) + if err != nil { + nonFatalErrors = fmt.Sprintf(", additionally the following non-fatal errors were reported: %v", err) + } + + // check rules against attributes + if authorizerrbac.RulesAllow(attributes, rules...) { + return authorizer.DecisionNoOpinion, "", nil + } + + // the scope prevent this. We need to authoritatively deny + return authorizer.DecisionDeny, fmt.Sprintf("scopes %v prevent this action%s", scopes, nonFatalErrors), nil +} diff --git a/openshift-kube-apiserver/authorization/scopeauthorizer/authorizer_test.go b/openshift-kube-apiserver/authorization/scopeauthorizer/authorizer_test.go new file mode 100644 index 0000000000000..9b73e6c2e23ac --- /dev/null +++ b/openshift-kube-apiserver/authorization/scopeauthorizer/authorizer_test.go @@ -0,0 +1,150 @@ +package scopeauthorizer + +import ( + "context" + "strings" + "testing" + + "k8s.io/apiserver/pkg/authentication/user" + kauthorizer "k8s.io/apiserver/pkg/authorization/authorizer" + + authorizationv1 "github.com/openshift/api/authorization/v1" +) + +func TestAuthorize(t *testing.T) { + testCases := []struct { + name string + attributes kauthorizer.AttributesRecord + expectedAllowed kauthorizer.Decision + expectedErr string + expectedMsg string + }{ + { + name: "no user", + attributes: kauthorizer.AttributesRecord{ + ResourceRequest: true, + Namespace: "ns", + }, + expectedAllowed: kauthorizer.DecisionNoOpinion, + expectedErr: `user missing from context`, + }, + { + name: "no extra", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{}, + ResourceRequest: true, + Namespace: "ns", + }, + expectedAllowed: kauthorizer.DecisionNoOpinion, + }, + { + name: "empty extra", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{}}, + ResourceRequest: true, + Namespace: "ns", + }, + expectedAllowed: kauthorizer.DecisionNoOpinion, + }, + { + name: "empty scopes", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {}}}, + ResourceRequest: true, + Namespace: "ns", + }, + expectedAllowed: kauthorizer.DecisionNoOpinion, + }, + { + name: "bad scope", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {"does-not-exist"}}}, + ResourceRequest: true, + Namespace: "ns", + }, + expectedAllowed: kauthorizer.DecisionDeny, + expectedMsg: `scopes [does-not-exist] prevent this action, additionally the following non-fatal errors were reported: no scope evaluator found for "does-not-exist"`, + }, + { + name: "bad scope 2", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {"role:dne"}}}, + ResourceRequest: true, + Namespace: "ns", + }, + expectedAllowed: kauthorizer.DecisionDeny, + expectedMsg: `scopes [role:dne] prevent this action, additionally the following non-fatal errors were reported: bad format for scope role:dne`, + }, + { + name: "scope doesn't cover", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {"user:info"}}}, + ResourceRequest: true, + Namespace: "ns", + Verb: "get", Resource: "users", Name: "harold"}, + expectedAllowed: kauthorizer.DecisionDeny, + expectedMsg: `scopes [user:info] prevent this action`, + }, + { + name: "scope covers", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {"user:info"}}}, + ResourceRequest: true, + Namespace: "ns", + Verb: "get", Resource: "users", Name: "~"}, + expectedAllowed: kauthorizer.DecisionNoOpinion, + }, + { + name: "scope covers for discovery", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {"user:info"}}}, + ResourceRequest: false, + Namespace: "ns", + Verb: "get", Path: "/api"}, + expectedAllowed: kauthorizer.DecisionNoOpinion, + }, + { + name: "user:full covers any resource", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {"user:full"}}}, + ResourceRequest: true, + Namespace: "ns", + Verb: "update", Resource: "users", Name: "harold"}, + expectedAllowed: kauthorizer.DecisionNoOpinion, + }, + { + name: "user:full covers any non-resource", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {"user:full"}}}, + ResourceRequest: false, + Namespace: "ns", + Verb: "post", Path: "/foo/bar/baz"}, + expectedAllowed: kauthorizer.DecisionNoOpinion, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + authorizer := NewAuthorizer(nil) + + actualAllowed, actualMsg, actualErr := authorizer.Authorize(context.TODO(), tc.attributes) + switch { + case len(tc.expectedErr) == 0 && actualErr == nil: + case len(tc.expectedErr) == 0 && actualErr != nil: + t.Errorf("%s: unexpected error: %v", tc.name, actualErr) + case len(tc.expectedErr) != 0 && actualErr == nil: + t.Errorf("%s: missing error: %v", tc.name, tc.expectedErr) + case len(tc.expectedErr) != 0 && actualErr != nil: + if !strings.Contains(actualErr.Error(), tc.expectedErr) { + t.Errorf("expected %v, got %v", tc.expectedErr, actualErr) + } + } + if tc.expectedMsg != actualMsg { + t.Errorf("expected %v, got %v", tc.expectedMsg, actualMsg) + } + if tc.expectedAllowed != actualAllowed { + t.Errorf("expected %v, got %v", tc.expectedAllowed, actualAllowed) + } + }) + } +} diff --git a/openshift-kube-apiserver/configdefault/kubecontrolplane_default.go b/openshift-kube-apiserver/configdefault/kubecontrolplane_default.go new file mode 100644 index 0000000000000..7e48ecea2ec9a --- /dev/null +++ b/openshift-kube-apiserver/configdefault/kubecontrolplane_default.go @@ -0,0 +1,115 @@ +package configdefault + +import ( + "io/ioutil" + "os" + "path/filepath" + + kubecontrolplanev1 "github.com/openshift/api/kubecontrolplane/v1" + "github.com/openshift/library-go/pkg/config/configdefaults" + "k8s.io/klog/v2" +) + +// ResolveDirectoriesForSATokenVerification takes our config (which allows directories) and navigates one level of +// those directories for files. This makes it easy to build a single configmap that contains lots of aggregated files. +// if we fail to open the file for inspection, the resolving code in kube-apiserver may have drifted from us +// we include the raw file and let the kube-apiserver succeed or fail. +func ResolveDirectoriesForSATokenVerification(config *kubecontrolplanev1.KubeAPIServerConfig) { + // kube doesn't honor directories, but we want to allow them in our sa token validators + resolvedSATokenValidationCerts := []string{} + for _, filename := range config.ServiceAccountPublicKeyFiles { + file, err := os.Open(filename) + if err != nil { + resolvedSATokenValidationCerts = append(resolvedSATokenValidationCerts, filename) + klog.Warningf(err.Error()) + continue + } + fileInfo, err := file.Stat() + if err != nil { + resolvedSATokenValidationCerts = append(resolvedSATokenValidationCerts, filename) + klog.Warningf(err.Error()) + continue + } + if !fileInfo.IsDir() { + resolvedSATokenValidationCerts = append(resolvedSATokenValidationCerts, filename) + continue + } + + contents, err := ioutil.ReadDir(filename) + switch { + case os.IsNotExist(err) || os.IsPermission(err): + klog.Warningf(err.Error()) + case err != nil: + panic(err) // some weird, unexpected error + default: + for _, content := range contents { + if !content.Mode().IsRegular() { + continue + } + resolvedSATokenValidationCerts = append(resolvedSATokenValidationCerts, filepath.Join(filename, content.Name())) + } + } + } + + config.ServiceAccountPublicKeyFiles = resolvedSATokenValidationCerts +} + +func SetRecommendedKubeAPIServerConfigDefaults(config *kubecontrolplanev1.KubeAPIServerConfig) { + configdefaults.DefaultString(&config.GenericAPIServerConfig.StorageConfig.StoragePrefix, "kubernetes.io") + configdefaults.DefaultString(&config.GenericAPIServerConfig.ServingInfo.BindAddress, "0.0.0.0:6443") + + configdefaults.SetRecommendedGenericAPIServerConfigDefaults(&config.GenericAPIServerConfig) + SetRecommendedMasterAuthConfigDefaults(&config.AuthConfig) + SetRecommendedAggregatorConfigDefaults(&config.AggregatorConfig) + SetRecommendedKubeletConnectionInfoDefaults(&config.KubeletClientInfo) + + configdefaults.DefaultString(&config.ServicesSubnet, "10.0.0.0/24") + configdefaults.DefaultString(&config.ServicesNodePortRange, "30000-32767") + + if len(config.ServiceAccountPublicKeyFiles) == 0 { + config.ServiceAccountPublicKeyFiles = append([]string{}, "/etc/kubernetes/static-pod-resources/configmaps/sa-token-signing-certs") + } + + // after the aggregator defaults are set, we can default the auth config values + // TODO this indicates that we're set two different things to the same value + if config.AuthConfig.RequestHeader == nil { + config.AuthConfig.RequestHeader = &kubecontrolplanev1.RequestHeaderAuthenticationOptions{} + configdefaults.DefaultStringSlice(&config.AuthConfig.RequestHeader.ClientCommonNames, []string{"system:openshift-aggregator"}) + configdefaults.DefaultString(&config.AuthConfig.RequestHeader.ClientCA, "/var/run/configmaps/aggregator-client-ca/ca-bundle.crt") + configdefaults.DefaultStringSlice(&config.AuthConfig.RequestHeader.UsernameHeaders, []string{"X-Remote-User"}) + configdefaults.DefaultStringSlice(&config.AuthConfig.RequestHeader.GroupHeaders, []string{"X-Remote-Group"}) + configdefaults.DefaultStringSlice(&config.AuthConfig.RequestHeader.ExtraHeaderPrefixes, []string{"X-Remote-Extra-"}) + } + + // Set defaults Cache TTLs for external Webhook Token Reviewers + for i := range config.AuthConfig.WebhookTokenAuthenticators { + if len(config.AuthConfig.WebhookTokenAuthenticators[i].CacheTTL) == 0 { + config.AuthConfig.WebhookTokenAuthenticators[i].CacheTTL = "2m" + } + } + + if config.OAuthConfig != nil { + for i := range config.OAuthConfig.IdentityProviders { + // By default, only let one identity provider authenticate a particular user + // If multiple identity providers collide, the second one in will fail to auth + // The admin can set this to "add" if they want to allow new identities to join existing users + configdefaults.DefaultString(&config.OAuthConfig.IdentityProviders[i].MappingMethod, "claim") + } + } +} + +func SetRecommendedMasterAuthConfigDefaults(config *kubecontrolplanev1.MasterAuthConfig) { +} + +func SetRecommendedAggregatorConfigDefaults(config *kubecontrolplanev1.AggregatorConfig) { + configdefaults.DefaultString(&config.ProxyClientInfo.KeyFile, "/var/run/secrets/aggregator-client/tls.key") + configdefaults.DefaultString(&config.ProxyClientInfo.CertFile, "/var/run/secrets/aggregator-client/tls.crt") +} + +func SetRecommendedKubeletConnectionInfoDefaults(config *kubecontrolplanev1.KubeletConnectionInfo) { + if config.Port == 0 { + config.Port = 10250 + } + configdefaults.DefaultString(&config.CertInfo.KeyFile, "/var/run/secrets/kubelet-client/tls.key") + configdefaults.DefaultString(&config.CertInfo.CertFile, "/var/run/secrets/kubelet-client/tls.crt") +} diff --git a/openshift-kube-apiserver/configdefault/kubecontrolplane_refs.go b/openshift-kube-apiserver/configdefault/kubecontrolplane_refs.go new file mode 100644 index 0000000000000..449952e5650d1 --- /dev/null +++ b/openshift-kube-apiserver/configdefault/kubecontrolplane_refs.go @@ -0,0 +1,122 @@ +package configdefault + +import ( + kubecontrolplanev1 "github.com/openshift/api/kubecontrolplane/v1" + osinv1 "github.com/openshift/api/osin/v1" + "github.com/openshift/library-go/pkg/config/helpers" +) + +func GetKubeAPIServerConfigFileReferences(config *kubecontrolplanev1.KubeAPIServerConfig) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + + refs = append(refs, helpers.GetGenericAPIServerConfigFileReferences(&config.GenericAPIServerConfig)...) + refs = append(refs, GetKubeletConnectionInfoFileReferences(&config.KubeletClientInfo)...) + + if config.OAuthConfig != nil { + refs = append(refs, GetOAuthConfigFileReferences(config.OAuthConfig)...) + } + + refs = append(refs, &config.AggregatorConfig.ProxyClientInfo.CertFile) + refs = append(refs, &config.AggregatorConfig.ProxyClientInfo.KeyFile) + + if config.AuthConfig.RequestHeader != nil { + refs = append(refs, &config.AuthConfig.RequestHeader.ClientCA) + } + for k := range config.AuthConfig.WebhookTokenAuthenticators { + refs = append(refs, &config.AuthConfig.WebhookTokenAuthenticators[k].ConfigFile) + } + if len(config.AuthConfig.OAuthMetadataFile) > 0 { + refs = append(refs, &config.AuthConfig.OAuthMetadataFile) + } + + refs = append(refs, &config.AggregatorConfig.ProxyClientInfo.CertFile) + refs = append(refs, &config.AggregatorConfig.ProxyClientInfo.KeyFile) + + for i := range config.ServiceAccountPublicKeyFiles { + refs = append(refs, &config.ServiceAccountPublicKeyFiles[i]) + } + + return refs +} + +func GetKubeletConnectionInfoFileReferences(config *kubecontrolplanev1.KubeletConnectionInfo) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, helpers.GetCertFileReferences(&config.CertInfo)...) + refs = append(refs, &config.CA) + return refs +} + +func GetOAuthConfigFileReferences(config *osinv1.OAuthConfig) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + + if config.MasterCA != nil { + refs = append(refs, config.MasterCA) + } + + refs = append(refs, GetSessionConfigFileReferences(config.SessionConfig)...) + for _, identityProvider := range config.IdentityProviders { + switch provider := identityProvider.Provider.Object.(type) { + case (*osinv1.RequestHeaderIdentityProvider): + refs = append(refs, &provider.ClientCA) + + case (*osinv1.HTPasswdPasswordIdentityProvider): + refs = append(refs, &provider.File) + + case (*osinv1.LDAPPasswordIdentityProvider): + refs = append(refs, &provider.CA) + refs = append(refs, helpers.GetStringSourceFileReferences(&provider.BindPassword)...) + + case (*osinv1.BasicAuthPasswordIdentityProvider): + refs = append(refs, helpers.GetRemoteConnectionInfoFileReferences(&provider.RemoteConnectionInfo)...) + + case (*osinv1.KeystonePasswordIdentityProvider): + refs = append(refs, helpers.GetRemoteConnectionInfoFileReferences(&provider.RemoteConnectionInfo)...) + + case (*osinv1.GitLabIdentityProvider): + refs = append(refs, &provider.CA) + refs = append(refs, helpers.GetStringSourceFileReferences(&provider.ClientSecret)...) + + case (*osinv1.OpenIDIdentityProvider): + refs = append(refs, &provider.CA) + refs = append(refs, helpers.GetStringSourceFileReferences(&provider.ClientSecret)...) + + case (*osinv1.GoogleIdentityProvider): + refs = append(refs, helpers.GetStringSourceFileReferences(&provider.ClientSecret)...) + + case (*osinv1.GitHubIdentityProvider): + refs = append(refs, helpers.GetStringSourceFileReferences(&provider.ClientSecret)...) + refs = append(refs, &provider.CA) + + } + } + + if config.Templates != nil { + refs = append(refs, &config.Templates.Login) + refs = append(refs, &config.Templates.ProviderSelection) + refs = append(refs, &config.Templates.Error) + } + + return refs +} + +func GetSessionConfigFileReferences(config *osinv1.SessionConfig) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, &config.SessionSecretsFile) + return refs +} diff --git a/openshift-kube-apiserver/enablement/enablement.go b/openshift-kube-apiserver/enablement/enablement.go new file mode 100644 index 0000000000000..d955f66825181 --- /dev/null +++ b/openshift-kube-apiserver/enablement/enablement.go @@ -0,0 +1,71 @@ +package enablement + +import ( + "fmt" + "runtime/debug" + + kubecontrolplanev1 "github.com/openshift/api/kubecontrolplane/v1" + genericapiserver "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/rest" +) + +func ForceOpenShift(newOpenshiftConfig *kubecontrolplanev1.KubeAPIServerConfig) { + isOpenShift = true + openshiftConfig = newOpenshiftConfig +} + +func SetLoopbackClientConfig(kubeClientConfig *rest.Config) { + loopbackClientConfig = rest.CopyConfig(kubeClientConfig) +} + +var ( + isOpenShift = false + openshiftConfig *kubecontrolplanev1.KubeAPIServerConfig + postStartHooks = map[string]PostStartHookConfigEntry{} + appendPostStartHooksCalled = false + loopbackClientConfig *rest.Config +) + +type PostStartHookConfigEntry struct { + Hook genericapiserver.PostStartHookFunc + // originatingStack holds the stack that registered postStartHooks. This allows us to show a more helpful message + // for duplicate registration. + OriginatingStack string +} + +func IsOpenShift() bool { + return isOpenShift +} + +func OpenshiftConfig() *kubecontrolplanev1.KubeAPIServerConfig { + return openshiftConfig +} + +func LoopbackClientConfig() *rest.Config { + return loopbackClientConfig +} + +func AddPostStartHookOrDie(name string, hook genericapiserver.PostStartHookFunc) { + if appendPostStartHooksCalled { + panic(fmt.Errorf("already appended post start hooks")) + } + if len(name) == 0 { + panic(fmt.Errorf("missing name")) + } + if hook == nil { + panic(fmt.Errorf("hook func may not be nil: %q", name)) + } + + if postStartHook, exists := postStartHooks[name]; exists { + // this is programmer error, but it can be hard to debug + panic(fmt.Errorf("unable to add %q because it was already registered by: %s", name, postStartHook.OriginatingStack)) + } + postStartHooks[name] = PostStartHookConfigEntry{Hook: hook, OriginatingStack: string(debug.Stack())} +} + +func AppendPostStartHooksOrDie(config *genericapiserver.Config) { + appendPostStartHooksCalled = true + for name, curr := range postStartHooks { + config.AddPostStartHookOrDie(name, curr.Hook) + } +} diff --git a/openshift-kube-apiserver/enablement/intialization.go b/openshift-kube-apiserver/enablement/intialization.go new file mode 100644 index 0000000000000..52794bec4b645 --- /dev/null +++ b/openshift-kube-apiserver/enablement/intialization.go @@ -0,0 +1,92 @@ +package enablement + +import ( + "io/ioutil" + "path" + + "k8s.io/kubernetes/plugin/pkg/admission/security/podsecurity" + + configv1 "github.com/openshift/api/config/v1" + kubecontrolplanev1 "github.com/openshift/api/kubecontrolplane/v1" + osinv1 "github.com/openshift/api/osin/v1" + "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccadmission" + "github.com/openshift/library-go/pkg/config/helpers" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/clientcmd/api" + aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver" + "k8s.io/kubernetes/openshift-kube-apiserver/configdefault" + "k8s.io/kubernetes/pkg/capabilities" + "k8s.io/kubernetes/pkg/kubeapiserver/authorizer" + kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy" +) + +func GetOpenshiftConfig(openshiftConfigFile string) (*kubecontrolplanev1.KubeAPIServerConfig, error) { + // try to decode into our new types first. right now there is no validation, no file path resolution. this unsticks the operator to start. + // TODO add those things + configContent, err := ioutil.ReadFile(openshiftConfigFile) + if err != nil { + return nil, err + } + scheme := runtime.NewScheme() + utilruntime.Must(kubecontrolplanev1.Install(scheme)) + codecs := serializer.NewCodecFactory(scheme) + obj, err := runtime.Decode(codecs.UniversalDecoder(kubecontrolplanev1.GroupVersion, configv1.GroupVersion, osinv1.GroupVersion), configContent) + if err != nil { + + return nil, err + } + + // Resolve relative to CWD + absoluteConfigFile, err := api.MakeAbs(openshiftConfigFile, "") + if err != nil { + return nil, err + } + configFileLocation := path.Dir(absoluteConfigFile) + + config := obj.(*kubecontrolplanev1.KubeAPIServerConfig) + if err := helpers.ResolvePaths(configdefault.GetKubeAPIServerConfigFileReferences(config), configFileLocation); err != nil { + return nil, err + } + configdefault.SetRecommendedKubeAPIServerConfigDefaults(config) + configdefault.ResolveDirectoriesForSATokenVerification(config) + + return config, nil +} + +func ForceGlobalInitializationForOpenShift() { + // This allows to move crqs, sccs, and rbrs to CRD + aggregatorapiserver.AddAlwaysLocalDelegateForPrefix("/apis/quota.openshift.io/v1/clusterresourcequotas") + aggregatorapiserver.AddAlwaysLocalDelegateForPrefix("/apis/security.openshift.io/v1/securitycontextconstraints") + aggregatorapiserver.AddAlwaysLocalDelegateForPrefix("/apis/authorization.openshift.io/v1/rolebindingrestrictions") + aggregatorapiserver.AddAlwaysLocalDelegateGroupResource(schema.GroupResource{Group: "authorization.openshift.io", Resource: "rolebindingrestrictions"}) + + // This allows the CRD registration to avoid fighting with the APIService from the operator + aggregatorapiserver.AddOverlappingGroupVersion(schema.GroupVersion{Group: "authorization.openshift.io", Version: "v1"}) + + // Allow privileged containers + capabilities.Initialize(capabilities.Capabilities{ + AllowPrivileged: true, + PrivilegedSources: capabilities.PrivilegedSources{ + HostNetworkSources: []string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, + HostPIDSources: []string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, + HostIPCSources: []string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, + }, + }) + + podsecurity.SCCMutatingPodSpecExtractorInstance.SetSCCAdmission(SCCAdmissionPlugin) + + // add permissions we require on our kube-apiserver + // TODO, we should scrub these out + bootstrappolicy.ClusterRoles = bootstrappolicy.OpenshiftClusterRoles + bootstrappolicy.ClusterRoleBindings = bootstrappolicy.OpenshiftClusterRoleBindings + + // we need to have the authorization chain place something before system:masters + // SkipSystemMastersAuthorizer disable implicitly added system/master authz, and turn it into another authz mode "SystemMasters", to be added via authorization-mode + authorizer.SkipSystemMastersAuthorizer() +} + +var SCCAdmissionPlugin = sccadmission.NewConstraint() diff --git a/openshift-kube-apiserver/filters/apirequestcount/apiaccess_count_controller.go b/openshift-kube-apiserver/filters/apirequestcount/apiaccess_count_controller.go new file mode 100644 index 0000000000000..ad2f82b737465 --- /dev/null +++ b/openshift-kube-apiserver/filters/apirequestcount/apiaccess_count_controller.go @@ -0,0 +1,217 @@ +package apirequestcount + +import ( + "context" + "fmt" + "math/rand" + "strings" + "sync" + "time" + + apiv1 "github.com/openshift/api/apiserver/v1" + apiv1client "github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/apirequestcount" + "k8s.io/kubernetes/openshift-kube-apiserver/filters/apirequestcount/v1helpers" +) + +// NewController returns a controller +func NewController(client apiv1client.APIRequestCountInterface, nodeName string) *controller { + ret := &controller{ + client: client, + nodeName: nodeName, + updatePeriod: 5 * time.Minute, + } + ret.resetRequestCount() + return ret +} + +// APIRequestLogger support logging API request counts. +type APIRequestLogger interface { + IsDeprecated(resource, version, group string) bool + LogRequest(resource schema.GroupVersionResource, timestamp time.Time, user, userAgent, verb string) + Start(stop <-chan struct{}) +} + +type controller struct { + client apiv1client.APIRequestCountInterface + nodeName string + updatePeriod time.Duration + loadOnce sync.Once + + requestCountLock sync.RWMutex + requestCounts *apiRequestCounts +} + +// IsDeprecated return true if the resource is deprecated. +func (c *controller) IsDeprecated(resource, version, group string) bool { + _, ok := deprecatedApiRemovedRelease[schema.GroupVersionResource{ + Group: group, + Version: version, + Resource: resource, + }] + return ok +} + +// LogRequest queues an api request for logging +func (c *controller) LogRequest(resource schema.GroupVersionResource, timestamp time.Time, user, userAgent, verb string) { + c.requestCountLock.RLock() + defer c.requestCountLock.RUnlock() + // we snip user agents to reduce cardinality and unique keys. For well-behaved agents, we see useragents about like + // kube-controller-manager/v1.21.0 (linux/amd64) kubernetes/743bd58/kube-controller-manager + // so we will snip at the first space. + snippedUserAgent := userAgent + if i := strings.Index(userAgent, " "); i > 0 { + snippedUserAgent = userAgent[:i] + } + userKey := userKey{ + user: user, + userAgent: snippedUserAgent, + } + c.requestCounts.IncrementRequestCount(resource, timestamp.Hour(), userKey, verb, 1) +} + +// resetCount returns the current count and creates a new requestCount instance var +func (c *controller) resetRequestCount() *apiRequestCounts { + c.requestCountLock.Lock() + defer c.requestCountLock.Unlock() + existing := c.requestCounts + c.requestCounts = newAPIRequestCounts(c.nodeName) + return existing +} + +// Start the controller +func (c *controller) Start(stop <-chan struct{}) { + klog.Infof("Starting APIRequestCount controller.") + + // create a context.Context needed for some API calls + ctx, cancel := context.WithCancel(context.Background()) + go func() { + <-stop + klog.Infof("Shutting down APIRequestCount controller.") + cancel() + }() + + // write out logs every c.updatePeriod + go wait.NonSlidingUntilWithContext(ctx, c.sync, c.updatePeriod) +} +func (c *controller) sync(ctx context.Context) { + currentHour := time.Now().Hour() + c.persistRequestCountForAllResources(ctx, currentHour) +} + +func (c *controller) persistRequestCountForAllResources(ctx context.Context, currentHour int) { + klog.V(4).Infof("updating top APIRequest counts") + defer klog.V(4).Infof("finished updating top APIRequest counts") + + // get the current count to persist, start a new in-memory count + countsToPersist := c.resetRequestCount() + + // remove stale data + expiredHour := (currentHour + 1) % 24 + countsToPersist.ExpireOldestCounts(expiredHour) + + // when this function returns, add any remaining counts back to the total to be retried for update + defer c.requestCounts.Add(countsToPersist) + + // Add resources that have an existing APIRequestCount so that the current and hourly logs + // continue to rotate even if the resource has not had a request since the last restart. + c.loadOnce.Do(func() { + // As resources are never fully removed from countsToPersist, we only need to do this once. + // After the request counts have been persisted, the resources will be added "back" to the + // in memory counts (c.requestCounts, see defer statement above). + arcs, err := c.client.List(ctx, metav1.ListOptions{}) + if err != nil { + runtime.HandleError(err) // oh well, we tried + return + } + for _, arc := range arcs.Items { + gvr, err := apirequestcount.NameToResource(arc.Name) + if err != nil { + runtime.HandleError(fmt.Errorf("invalid APIRequestCount %s (added manually) should be deleted: %v", arc.Name, err)) + continue + } + countsToPersist.Resource(gvr) + } + }) + + var wg sync.WaitGroup + for gvr := range countsToPersist.resourceToRequestCount { + resourceCount := countsToPersist.Resource(gvr) + wg.Add(1) + go func() { + time.Sleep(time.Duration(rand.Int63n(int64(c.updatePeriod / 5 * 4)))) // smear out over the interval to avoid resource spikes + c.persistRequestCountForResource(ctx, &wg, currentHour, expiredHour, resourceCount) + }() + } + wg.Wait() +} + +func (c *controller) persistRequestCountForResource(ctx context.Context, wg *sync.WaitGroup, currentHour, expiredHour int, localResourceCount *resourceRequestCounts) { + defer wg.Done() + + klog.V(4).Infof("updating top %v APIRequest counts", localResourceCount.resource) + defer klog.V(4).Infof("finished updating top %v APIRequest counts", localResourceCount.resource) + + status, _, err := v1helpers.ApplyStatus( + ctx, + c.client, + resourceToAPIName(localResourceCount.resource), + nodeStatusDefaulter(c.nodeName, currentHour, expiredHour, localResourceCount.resource), + SetRequestCountsForNode(c.nodeName, currentHour, expiredHour, localResourceCount), + ) + if err != nil { + runtime.HandleError(err) + return + } + + // on successful update, remove the counts we don't need. This is every hour except the current hour + // and every user recorded for the current hour on this node + removePersistedRequestCounts(c.nodeName, currentHour, status, localResourceCount) +} + +// removePersistedRequestCounts removes the counts we don't need to keep in memory. +// This is every hour except the current hour (those will no longer change) and every user recorded for the current hour on this node. +// Then it tracks the amount that needs to be kept out of the sum. This is logically the amount we're adding back in. +// Because we already counted all the users in the persisted sum, we need to exclude the amount we'll be placing back +// in memory. +func removePersistedRequestCounts(nodeName string, currentHour int, persistedStatus *apiv1.APIRequestCountStatus, localResourceCount *resourceRequestCounts) { + for hourIndex := range localResourceCount.hourToRequestCount { + if currentHour != hourIndex { + localResourceCount.RemoveHour(hourIndex) + } + } + for _, persistedNodeCount := range persistedStatus.CurrentHour.ByNode { + if persistedNodeCount.NodeName != nodeName { + continue + } + for _, persistedUserCount := range persistedNodeCount.ByUser { + userKey := userKey{ + user: persistedUserCount.UserName, + userAgent: persistedUserCount.UserAgent, + } + localResourceCount.Hour(currentHour).RemoveUser(userKey) + } + } + + countToSuppress := int64(0) + for _, userCounts := range localResourceCount.Hour(currentHour).usersToRequestCounts { + for _, verbCount := range userCounts.verbsToRequestCounts { + countToSuppress += verbCount.count + } + } + + localResourceCount.Hour(currentHour).countToSuppress = countToSuppress +} + +func resourceToAPIName(resource schema.GroupVersionResource) string { + apiName := resource.Resource + "." + resource.Version + if len(resource.Group) > 0 { + apiName += "." + resource.Group + } + return apiName +} diff --git a/openshift-kube-apiserver/filters/apirequestcount/apiaccess_count_controller_test.go b/openshift-kube-apiserver/filters/apirequestcount/apiaccess_count_controller_test.go new file mode 100644 index 0000000000000..19631c17524cd --- /dev/null +++ b/openshift-kube-apiserver/filters/apirequestcount/apiaccess_count_controller_test.go @@ -0,0 +1,1242 @@ +package apirequestcount + +import ( + "context" + "fmt" + "sort" + "strconv" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + apiv1 "github.com/openshift/api/apiserver/v1" + "github.com/openshift/client-go/apiserver/clientset/versioned/fake" + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/apirequestcount" +) + +func TestRemovedRelease(t *testing.T) { + rr := removedRelease( + schema.GroupVersionResource{ + Group: "flowcontrol.apiserver.k8s.io", + Version: "v1alpha1", + Resource: "flowschemas", + }) + assert.Equal(t, "1.21", rr) +} + +func TestLoggingResetRace(t *testing.T) { + c := &controller{} + c.resetRequestCount() + + start := make(chan struct{}) + for i := 0; i < 20; i++ { + go func() { + <-start + for i := 0; i < 100; i++ { + c.LogRequest(schema.GroupVersionResource{Resource: "pods"}, time.Now(), "user", "some-agent", "verb") + } + }() + } + + for i := 0; i < 10; i++ { + go func() { + <-start + for i := 0; i < 100; i++ { + c.resetRequestCount() + } + }() + } + + close(start) + + // hope for no data race, which of course failed +} + +func TestAPIStatusToRequestCount(t *testing.T) { + testCases := []struct { + name string + resource schema.GroupVersionResource + status *apiv1.APIRequestCountStatus + expected *clusterRequestCounts + }{ + { + name: "Empty", + resource: gvr("test.v1.group"), + status: &apiv1.APIRequestCountStatus{}, + expected: cluster(), + }, + { + name: "NotEmpty", + resource: gvr("test.v1.group"), + status: &apiv1.APIRequestCountStatus{ + Last24h: []apiv1.PerResourceAPIRequestLog{ + {}, + {}, + {}, + {ByNode: []apiv1.PerNodeAPIRequestLog{ + {NodeName: "node1", ByUser: []apiv1.PerUserAPIRequestCount{ + {UserName: "eva", UserAgent: "some-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "get", RequestCount: 625}, {Verb: "watch", RequestCount: 540}, + }}, + }}, + {NodeName: "node3", ByUser: []apiv1.PerUserAPIRequestCount{ + {UserName: "mia", UserAgent: "some-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "list", RequestCount: 1427}, {Verb: "create", RequestCount: 1592}, {Verb: "watch", RequestCount: 1143}, + }}, + {UserName: "ava", UserAgent: "some-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "update", RequestCount: 40}, {Verb: "patch", RequestCount: 1047}, + }}, + }}, + {NodeName: "node5", ByUser: []apiv1.PerUserAPIRequestCount{ + {UserName: "mia", UserAgent: "some-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "delete", RequestCount: 360}, {Verb: "deletecollection", RequestCount: 1810}, {Verb: "update", RequestCount: 149}, + }}, + {UserName: "zoe", UserAgent: "some-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "get", RequestCount: 1714}, {Verb: "watch", RequestCount: 606}, {Verb: "list", RequestCount: 703}, + }}, + }}, + {NodeName: "node2", ByUser: []apiv1.PerUserAPIRequestCount{ + {UserName: "mia", UserAgent: "some-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "get", RequestCount: 305}, + }}, + {UserName: "ivy", UserAgent: "some-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "create", RequestCount: 1113}, + }}, + {UserName: "zoe", UserAgent: "some-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "patch", RequestCount: 1217}, {Verb: "delete", RequestCount: 1386}, + }}, + }}, + }}, + {ByNode: []apiv1.PerNodeAPIRequestLog{ + {NodeName: "node1", ByUser: []apiv1.PerUserAPIRequestCount{ + {UserName: "mia", UserAgent: "some-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "delete", RequestCount: 1386}, + }}, + }}, + {NodeName: "node5", ByUser: []apiv1.PerUserAPIRequestCount{ + {UserName: "ava", UserAgent: "some-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "create", RequestCount: 1091}, + }}, + }}, + }}, + {}, + {}, + {}, + {ByNode: []apiv1.PerNodeAPIRequestLog{ + {NodeName: "node3", ByUser: []apiv1.PerUserAPIRequestCount{ + {UserName: "eva", UserAgent: "some-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "list", RequestCount: 20}, + }}, + }}, + }}, + }, + }, + expected: cluster( + withNode("node1", + withResource("test.v1.group", + withHour(3, + withUser("eva", "some-agent", withCounts("get", 625), withCounts("watch", 540)), + ), + withHour(4, + withUser("mia", "some-agent", withCounts("delete", 1386)), + ), + ), + ), + withNode("node3", + withResource("test.v1.group", + withHour(3, + withUser("mia", "some-agent", + withCounts("list", 1427), + withCounts("create", 1592), + withCounts("watch", 1143), + ), + withUser("ava", "some-agent", + withCounts("update", 40), + withCounts("patch", 1047), + ), + ), + withHour(8, + withUser("eva", "some-agent", withCounts("list", 20)), + ), + ), + ), + withNode("node5", + withResource("test.v1.group", + withHour(3, + withUser("mia", "some-agent", + withCounts("delete", 360), + withCounts("deletecollection", 1810), + withCounts("update", 149), + ), + withUser("zoe", "some-agent", + withCounts("get", 1714), + withCounts("watch", 606), + withCounts("list", 703), + ), + ), + withHour(4, + withUser("ava", "some-agent", withCounts("create", 1091)), + ), + ), + ), + withNode("node2", + withResource("test.v1.group", + withHour(3, + withUser("mia", "some-agent", + withCounts("get", 305), + ), + withUser("ivy", "some-agent", + withCounts("create", 1113), + ), + withUser("zoe", "some-agent", + withCounts("patch", 1217), + withCounts("delete", 1386), + ), + ), + ), + ), + ), + }, + { + name: "SplitUserAgent", + resource: gvr("test.v1.group"), + status: &apiv1.APIRequestCountStatus{ + Last24h: []apiv1.PerResourceAPIRequestLog{ + {}, + {}, + {}, + {ByNode: []apiv1.PerNodeAPIRequestLog{ + {NodeName: "node1", ByUser: []apiv1.PerUserAPIRequestCount{ + {UserName: "eva", UserAgent: "some-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "get", RequestCount: 625}, {Verb: "watch", RequestCount: 540}, + }}, + }}, + {NodeName: "node3", ByUser: []apiv1.PerUserAPIRequestCount{ + {UserName: "mia", UserAgent: "some-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "list", RequestCount: 1427}, {Verb: "create", RequestCount: 1592}, {Verb: "watch", RequestCount: 1143}, + }}, + {UserName: "mia", UserAgent: "DIFFERENT-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "delete", RequestCount: 531}, + }}, + {UserName: "ava", UserAgent: "some-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "update", RequestCount: 40}, {Verb: "patch", RequestCount: 1047}, + }}, + }}, + {NodeName: "node5", ByUser: []apiv1.PerUserAPIRequestCount{ + {UserName: "mia", UserAgent: "some-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "delete", RequestCount: 360}, {Verb: "deletecollection", RequestCount: 1810}, {Verb: "update", RequestCount: 149}, + }}, + {UserName: "zoe", UserAgent: "some-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "get", RequestCount: 1714}, {Verb: "watch", RequestCount: 606}, {Verb: "list", RequestCount: 703}, + }}, + }}, + {NodeName: "node2", ByUser: []apiv1.PerUserAPIRequestCount{ + {UserName: "mia", UserAgent: "some-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "get", RequestCount: 305}, + }}, + {UserName: "ivy", UserAgent: "some-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "create", RequestCount: 1113}, + }}, + {UserName: "zoe", UserAgent: "some-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "patch", RequestCount: 1217}, {Verb: "delete", RequestCount: 1386}, + }}, + }}, + }}, + {ByNode: []apiv1.PerNodeAPIRequestLog{ + {NodeName: "node1", ByUser: []apiv1.PerUserAPIRequestCount{ + {UserName: "mia", UserAgent: "some-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "delete", RequestCount: 1386}, + }}, + }}, + {NodeName: "node5", ByUser: []apiv1.PerUserAPIRequestCount{ + {UserName: "ava", UserAgent: "some-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "create", RequestCount: 1091}, + }}, + }}, + }}, + {}, + {}, + {}, + {ByNode: []apiv1.PerNodeAPIRequestLog{ + {NodeName: "node3", ByUser: []apiv1.PerUserAPIRequestCount{ + {UserName: "eva", UserAgent: "some-agent", ByVerb: []apiv1.PerVerbAPIRequestCount{ + {Verb: "list", RequestCount: 20}, + }}, + }}, + }}, + }, + }, + expected: cluster( + withNode("node1", + withResource("test.v1.group", + withHour(3, + withUser("eva", "some-agent", withCounts("get", 625), withCounts("watch", 540)), + ), + withHour(4, + withUser("mia", "some-agent", withCounts("delete", 1386)), + ), + ), + ), + withNode("node3", + withResource("test.v1.group", + withHour(3, + withUser("mia", "some-agent", + withCounts("list", 1427), + withCounts("create", 1592), + withCounts("watch", 1143), + ), + withUser("mia", "DIFFERENT-agent", + withCounts("delete", 531), + ), + withUser("ava", "some-agent", + withCounts("update", 40), + withCounts("patch", 1047), + ), + ), + withHour(8, + withUser("eva", "some-agent", withCounts("list", 20)), + ), + ), + ), + withNode("node5", + withResource("test.v1.group", + withHour(3, + withUser("mia", "some-agent", + withCounts("delete", 360), + withCounts("deletecollection", 1810), + withCounts("update", 149), + ), + withUser("zoe", "some-agent", + withCounts("get", 1714), + withCounts("watch", 606), + withCounts("list", 703), + ), + ), + withHour(4, + withUser("ava", "some-agent", withCounts("create", 1091)), + ), + ), + ), + withNode("node2", + withResource("test.v1.group", + withHour(3, + withUser("mia", "some-agent", + withCounts("get", 305), + ), + withUser("ivy", "some-agent", + withCounts("create", 1113), + ), + withUser("zoe", "some-agent", + withCounts("patch", 1217), + withCounts("delete", 1386), + ), + ), + ), + ), + ), + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + actual := apiStatusToRequestCount(tc.resource, tc.status) + assert.Equal(t, actual, tc.expected) + }) + } +} + +func TestSetRequestCountsForNode(t *testing.T) { + testCases := []struct { + name string + nodeName string + expiredHour int + countsToPersist *resourceRequestCounts + status *apiv1.APIRequestCountStatus + expected *apiv1.APIRequestCountStatus + }{ + { + name: "Empty", + nodeName: "node1", + expiredHour: 5, + countsToPersist: resource("test.v1.group"), + status: &apiv1.APIRequestCountStatus{}, + expected: apiRequestCountStatus( + withRequestLastHour(withPerNodeAPIRequestLog("node1")), + withRequestLast24hN("0-4,6-23", withPerNodeAPIRequestLog("node1")), + withComputedRequestCountTotals(), + ), + }, + { + name: "EmptyStatus", + nodeName: "node1", + expiredHour: 5, + countsToPersist: resource("test.v1.group", + withHour(3, + withUser("eva", "some-agent", withCounts("get", 625), withCounts("watch", 540)), + ), + withHour(4, + withUser("mia", "some-agent", withCounts("delete", 1386)), + ), + ), + status: &apiv1.APIRequestCountStatus{}, + expected: apiRequestCountStatus( + withRequestLastHour( + withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("mia", "some-agent", withRequestCount("delete", 1386)), + ), + ), + withRequestLast24hN("0-4,6-23", withPerNodeAPIRequestLog("node1")), + withRequestLast24h(3, withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("eva", "some-agent", withRequestCount("get", 625), withRequestCount("watch", 540)), + )), + withRequestLast24h(4, withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("mia", "some-agent", withRequestCount("delete", 1386)), + )), + withComputedRequestCountTotals(), + ), + }, + { + name: "UpdateAndExpire", + nodeName: "node1", + expiredHour: 3, + countsToPersist: resource("test.v1.group", + withHour(3, + withUser("eva", "some-agent", withCounts("get", 625), withCounts("watch", 540)), + ), + withHour(4, + withUser("mia", "some-agent", withCounts("delete", 1386)), + ), + withHour(5, + withUser("mia", "some-agent", withCounts("list", 434)), + ), + ), + status: apiRequestCountStatus( + withRequestLastHour(withPerNodeAPIRequestLog("node1")), + withRequestLast24hN("0-4,6-23", withPerNodeAPIRequestLog("node1")), + withRequestLast24h(3, withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("eva", "some-agent", withRequestCount("get", 625), withRequestCount("watch", 540)), + )), + withRequestLast24h(4, withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("mia", "some-agent", withRequestCount("delete", 1386)), + )), + withComputedRequestCountTotals(), + ), + expected: apiRequestCountStatus( + withRequestLastHour(withPerNodeAPIRequestLog("node1")), + withRequestLast24hN("0-2,4-23", withPerNodeAPIRequestLog("node1")), + withRequestLast24h(4, withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("mia", "some-agent", withRequestCount("delete", 2772)), + )), + withRequestLast24h(5, withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("mia", "some-agent", withRequestCount("list", 434)), + )), + withComputedRequestCountTotals(), + ), + }, + { + name: "OtherNode", + nodeName: "node2", + expiredHour: 5, + countsToPersist: resource("test.v1.group", + withHour(3, + withUser("mia", "some-agent", withCounts("get", 305)), + withUser("ivy", "some-agent", withCounts("create", 1113)), + withUser("zoe", "some-agent", withCounts("patch", 1217), withCounts("delete", 1386)), + ), + ), + status: apiRequestCountStatus( + withRequestLastHour(withPerNodeAPIRequestLog("node1")), + withRequestLast24hN("0-4,6-23", withPerNodeAPIRequestLog("node1")), + withRequestLast24h(3, withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("eva", "some-agent", withRequestCount("get", 625), withRequestCount("watch", 540)), + )), + withRequestLast24h(4, withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("mia", "some-agent", withRequestCount("delete", 1386)), + )), + withComputedRequestCountTotals(), + ), + expected: apiRequestCountStatus( + withRequestLastHour( + withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("mia", "some-agent", withRequestCount("delete", 1386)), + ), + withPerNodeAPIRequestLog("node2"), + ), + withRequestLast24hN("0-4,6-23", withPerNodeAPIRequestLog("node1"), withPerNodeAPIRequestLog("node2")), + withRequestLast24h(3, + withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("eva", "some-agent", withRequestCount("get", 625), withRequestCount("watch", 540)), + ), + withPerNodeAPIRequestLog("node2", + withPerUserAPIRequestCount("zoe", "some-agent", withRequestCount("delete", 1386), withRequestCount("patch", 1217)), + withPerUserAPIRequestCount("ivy", "some-agent", withRequestCount("create", 1113)), + withPerUserAPIRequestCount("mia", "some-agent", withRequestCount("get", 305)), + ), + ), + withRequestLast24h(4, + withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("mia", "some-agent", withRequestCount("delete", 1386)), + ), + withPerNodeAPIRequestLog("node2"), + ), + withComputedRequestCountTotals(), + ), + }, + { + name: "PreviousCountSuppression", + nodeName: "node2", + expiredHour: 5, + countsToPersist: resource("test.v1.group", + withHour(3, + withCountToSuppress(10), + withUser("mia", "some-agent", withCounts("get", 305)), + withUser("ivy", "some-agent", withCounts("create", 1113)), + withUser("zoe", "some-agent", withCounts("patch", 1217), withCounts("delete", 1386)), + ), + ), + status: apiRequestCountStatus( + withRequestLastHour(withPerNodeAPIRequestLog("node1")), + withRequestLast24hN("0-4,6-23", withPerNodeAPIRequestLog("node1")), + withRequestLast24h(3, withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("eva", "some-agent", withRequestCount("get", 625), withRequestCount("watch", 540)), + )), + withRequestLast24h(4, withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("mia", "some-agent", withRequestCount("delete", 1386)), + )), + withComputedRequestCountTotals(), + ), + expected: apiRequestCountStatus( + withRequestLastHour( + withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("mia", "some-agent", withRequestCount("delete", 1386)), + ), + withPerNodeAPIRequestLog("node2"), + ), + withRequestLast24hN("0-4,6-23", withPerNodeAPIRequestLog("node1"), withPerNodeAPIRequestLog("node2")), + withRequestLast24h(3, + withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("eva", "some-agent", withRequestCount("get", 625), withRequestCount("watch", 540)), + ), + withPerNodeAPIRequestLog("node2", + withPerNodeRequestCount(4011), + withPerUserAPIRequestCount("zoe", "some-agent", withRequestCount("delete", 1386), withRequestCount("patch", 1217)), + withPerUserAPIRequestCount("ivy", "some-agent", withRequestCount("create", 1113)), + withPerUserAPIRequestCount("mia", "some-agent", withRequestCount("get", 305)), + ), + ), + withRequestLast24h(4, + withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("mia", "some-agent", withRequestCount("delete", 1386)), + ), + withPerNodeAPIRequestLog("node2"), + ), + withComputedRequestCountTotals(), + ), + }, + { + name: "UniqueAgents", + nodeName: "node1", + expiredHour: 5, + countsToPersist: resource("test.v1.group", + withHour(3, + withUser("eva", "some-agent", withCounts("get", 625), withCounts("watch", 540)), + ), + withHour(4, + withUser("mia", "some-agent", withCounts("delete", 1386)), + withUser("mia", "DIFFERENT-agent", withCounts("delete", 542)), + ), + ), + status: &apiv1.APIRequestCountStatus{}, + expected: apiRequestCountStatus( + withRequestLastHour( + withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("mia", "some-agent", withRequestCount("delete", 1386)), + withPerUserAPIRequestCount("mia", "DIFFERENT-agent", withRequestCount("delete", 542)), + ), + ), + withRequestLast24hN("0-4,6-23", withPerNodeAPIRequestLog("node1")), + withRequestLast24h(3, withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("eva", "some-agent", withRequestCount("get", 625), withRequestCount("watch", 540)), + )), + withRequestLast24h(4, withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("mia", "some-agent", withRequestCount("delete", 1386)), + withPerUserAPIRequestCount("mia", "DIFFERENT-agent", withRequestCount("delete", 542)), + )), + withComputedRequestCountTotals(), + ), + }, + { + name: "NumberOfUsersToReport", + nodeName: "node1", + expiredHour: 5, + countsToPersist: resource("test.v1.group", + withHour(3, + withUser("ana", "some-agent", withCounts("get", 101)), + withUser("bob", "some-agent", withCounts("get", 102)), + withUser("eva", "some-agent", withCounts("get", 103)), + withUser("gus", "some-agent", withCounts("get", 104)), + withUser("ivy", "some-agent", withCounts("get", 105)), + withUser("joe", "some-agent", withCounts("get", 106)), + withUser("lia", "some-agent", withCounts("get", 107)), + withUser("max", "some-agent", withCounts("get", 108)), + withUser("mia", "some-agent", withCounts("get", 109)), + withUser("rex", "some-agent", withCounts("get", 110)), + withUser("amy", "some-agent", withCounts("get", 100)), + withUser("zoe", "some-agent", withCounts("get", 111)), + ), + withHour(4, + withUser("mia", "some-agent", withCounts("delete", 1386)), + ), + ), + status: &apiv1.APIRequestCountStatus{}, + expected: apiRequestCountStatus( + withRequestLastHour( + withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("mia", "some-agent", withRequestCount("delete", 1386)), + ), + ), + withRequestLast24hN("0-4,6-23", withPerNodeAPIRequestLog("node1")), + withRequestLast24h(3, withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("zoe", "some-agent", withRequestCount("get", 111)), + withPerUserAPIRequestCount("rex", "some-agent", withRequestCount("get", 110)), + withPerUserAPIRequestCount("mia", "some-agent", withRequestCount("get", 109)), + withPerUserAPIRequestCount("max", "some-agent", withRequestCount("get", 108)), + withPerUserAPIRequestCount("lia", "some-agent", withRequestCount("get", 107)), + withPerUserAPIRequestCount("joe", "some-agent", withRequestCount("get", 106)), + withPerUserAPIRequestCount("ivy", "some-agent", withRequestCount("get", 105)), + withPerUserAPIRequestCount("gus", "some-agent", withRequestCount("get", 104)), + withPerUserAPIRequestCount("eva", "some-agent", withRequestCount("get", 103)), + withPerUserAPIRequestCount("bob", "some-agent", withRequestCount("get", 102)), + )), + withRequestLast24h(4, withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("mia", "some-agent", withRequestCount("delete", 1386)), + )), + withComputedRequestCountTotals( + withAdditionalRequestCounts(3, "node1", 101), + withAdditionalRequestCounts(3, "node1", 100), + ), + ), + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + currentHour := tc.expiredHour - 1 + SetRequestCountsForNode(tc.nodeName, currentHour, tc.expiredHour, tc.countsToPersist)(10, tc.status) + assert.Equal(t, tc.expected, tc.status) + }) + } + +} + +func TestPersistRequestCountForAllResources(t *testing.T) { + + type logRequestFn func(*controller) + + testCases := []struct { + name string + currentHour int + existing []runtime.Object + requests []logRequestFn + expected []*apiv1.APIRequestCount + }{ + { + name: "Noop", + }, + { + name: "EmptyStatus", + existing: []runtime.Object{ + apiRequestCount("test.v1.group"), + }, + expected: []*apiv1.APIRequestCount{ + apiRequestCount("test.v1.group", withStatus( + withRequestLastHour(withPerNodeAPIRequestLog("node10")), + withRequestLast24hN("0,2-23", withPerNodeAPIRequestLog("node10")), + )), + }, + }, + { + name: "IgnoreInvalidResourceName", + existing: []runtime.Object{ + apiRequestCount("test-v1-invalid"), + apiRequestCount("test.v1.group"), + }, + expected: []*apiv1.APIRequestCount{ + apiRequestCount("test-v1-invalid"), + apiRequestCount("test.v1.group", withStatus( + withRequestLastHour(withPerNodeAPIRequestLog("node10")), + withRequestLast24hN("0,2-23", withPerNodeAPIRequestLog("node10")), + )), + }, + }, + { + name: "OnRestart", + existing: []runtime.Object{ + // current hour is 0, this api has not been requested since hour 20 + apiRequestCount("test.v1.group", + withStatus( + withRequestLastHour( + withPerNodeAPIRequestLog("node10", + withPerUserAPIRequestCount("user10", "agent10", withRequestCount("get", 100)), + ), + ), + withRequestLast24hN("*", withPerNodeAPIRequestLog("node10")), + withRequestLast24h(20, withPerNodeAPIRequestLog("node10", + withPerUserAPIRequestCount("user10", "agent10", withRequestCount("get", 100)), + )), + withComputedRequestCountTotals(), + ), + ), + // this api will have some current requests + apiRequestCount("test.v2.group"), + }, + requests: []logRequestFn{ + withRequestN("test.v2.group", 0, "user10", "agent10", "get", 53), + withRequestN("test.v3.group", 0, "user10", "agent10", "get", 57), + }, + expected: []*apiv1.APIRequestCount{ + apiRequestCount("test.v1.group", + withStatus( + withRequestLastHour(withPerNodeAPIRequestLog("node10")), + withRequestLast24hN("0,2-23", withPerNodeAPIRequestLog("node10")), + withRequestLast24h(20, withPerNodeAPIRequestLog("node10", + withPerUserAPIRequestCount("user10", "agent10", withRequestCount("get", 100)), + )), + withComputedRequestCountTotals(), + ), + ), + apiRequestCount("test.v2.group", + withStatus( + withRequestLastHour(withPerNodeAPIRequestLog("node10", + withPerUserAPIRequestCount("user10", "agent10", withRequestCount("get", 53)), + )), + withRequestLast24hN("0,2-23", withPerNodeAPIRequestLog("node10")), + withRequestLast24h(0, withPerNodeAPIRequestLog("node10", + withPerUserAPIRequestCount("user10", "agent10", withRequestCount("get", 53)), + )), + withComputedRequestCountTotals(), + ), + ), + apiRequestCount("test.v3.group", + withStatus( + withRequestLastHour(withPerNodeAPIRequestLog("node10", + withPerUserAPIRequestCount("user10", "agent10", withRequestCount("get", 57)), + )), + withRequestLast24hN("0,2-23", withPerNodeAPIRequestLog("node10")), + withRequestLast24h(0, withPerNodeAPIRequestLog("node10", + withPerUserAPIRequestCount("user10", "agent10", withRequestCount("get", 57)), + )), + withComputedRequestCountTotals(), + ), + ), + }, + }, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + c := NewController( + fake.NewSimpleClientset(tc.existing...).ApiserverV1().APIRequestCounts(), + "node10", + ) + c.updatePeriod = time.Millisecond + + for _, logRequest := range tc.requests { + logRequest(c) + } + c.persistRequestCountForAllResources(ctx, tc.currentHour) + + arcs, err := c.client.List(ctx, metav1.ListOptions{}) + if err != nil { + t.Fatal(err) + } + if len(arcs.Items) != len(tc.expected) { + t.Errorf("expected %d APIRequestCounts, got %d.", len(tc.expected), len(arcs.Items)) + } + + for _, expectedARC := range tc.expected { + actual, err := c.client.Get(ctx, expectedARC.Name, metav1.GetOptions{}) + if err != nil { + t.Error(err) + } + if !equality.Semantic.DeepEqual(expectedARC, actual) { + t.Error(cmp.Diff(expectedARC, actual)) + } + } + }) + } + + t.Run("Deleted", func(t *testing.T) { + + // "start" controller + c := NewController( + fake.NewSimpleClientset().ApiserverV1().APIRequestCounts(), + "node10", + ) + c.updatePeriod = time.Millisecond + + // log requests + withRequest("test.v1.group", 0, "user10", "agent10", "get")(c) + withRequest("test.v2.group", 0, "user10", "agent10", "get")(c) + withRequest("test.v3.group", 0, "user10", "agent10", "get")(c) + + // sync + c.persistRequestCountForAllResources(ctx, 0) + + // assert apirequestcounts created + for _, n := range []string{"test.v1.group", "test.v2.group", "test.v3.group"} { + if _, err := c.client.Get(ctx, n, metav1.GetOptions{}); err != nil { + t.Fatalf("Expected APIRequestCount %s: %s", n, err) + } + } + + // delete an apirequestcount + deleted := "test.v2.group" + if err := c.client.Delete(ctx, deleted, metav1.DeleteOptions{}); err != nil { + t.Fatalf("Unable to delete APIRequestCount %s: %s", deleted, err) + } + + // log requests + withRequest("test.v1.group", 1, "user11", "agent11", "get")(c) + withRequest("test.v3.group", 1, "user11", "agent11", "get")(c) + + // sync + c.persistRequestCountForAllResources(ctx, 1) + + // assert deleted apirequestcounts not re-created + if _, err := c.client.Get(ctx, deleted, metav1.GetOptions{}); err == nil { + t.Fatalf("Did not expect to find deleted APIRequestCount %s.", deleted) + } + + }) + + t.Run("24HourLogExpiration", func(t *testing.T) { + + // "start" controller + c := NewController( + fake.NewSimpleClientset().ApiserverV1().APIRequestCounts(), + "node10", + ) + c.updatePeriod = time.Millisecond + + // log 24 hrs of request requests + for i := 0; i < 24; i++ { + suffix := fmt.Sprintf("%02d", i) + withRequest("test.v1.group", i, "user"+suffix, "agent"+suffix, "get")(c) + } + + // sync + c.persistRequestCountForAllResources(ctx, 0) + + // assert apirequestcounts created + actual, err := c.client.Get(ctx, "test.v1.group", metav1.GetOptions{}) + if err != nil { + t.Fatalf("Expected APIRequestCount %s: %s", "test.v1.group", err) + } + + expectedCounts := []int64{1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} + + // assert expected counts + if actual.Status.CurrentHour.RequestCount != 1 { + t.Fatalf("%02d: CH: expected requestCount: %d, actual: %d", 0, 1, actual.Status.CurrentHour.RequestCount) + } + for i := range actual.Status.Last24h { + if actual.Status.Last24h[i].RequestCount != expectedCounts[i] { + t.Fatalf("%02d: %02d: expected requestCount: %d, actual: %d", 0, i, expectedCounts[i], actual.Status.Last24h[i].RequestCount) + } + } + + // sync 24 hrs + for i := 1; i < 24; i++ { + c.persistRequestCountForAllResources(ctx, i) + + // next hour should be clear + expectedCounts[(i+1)%24] = 0 + + actual, err = c.client.Get(ctx, "test.v1.group", metav1.GetOptions{}) + if err != nil { + t.Fatalf("Expected APIRequestCount %s: %s", "test.v1.group", err) + } + // assert expected counts + if actual.Status.CurrentHour.RequestCount != 0 { + t.Fatalf("%02d: CH: expected requestCount: %d, actual: %d", i, 0, actual.Status.CurrentHour.RequestCount) + } + for h := range actual.Status.Last24h { + if actual.Status.Last24h[h].RequestCount != expectedCounts[h] { + t.Fatalf("%02d: %02d: expected requestCount: %d, actual: %d", 0, i, expectedCounts[h], actual.Status.Last24h[h].RequestCount) + } + } + } + }) + +} +func withRequestN(resource string, hour int, user, agent, verb string, n int) func(*controller) { + f := withRequest(resource, hour, user, agent, verb) + return func(c *controller) { + for i := 0; i < n; i++ { + f(c) + } + } +} + +func withRequest(resource string, hour int, user, agent, verb string) func(*controller) { + ts := time.Date(2021, 11, 9, hour, 0, 0, 0, time.UTC) + return func(c *controller) { + gvr, err := apirequestcount.NameToResource(resource) + if err != nil { + panic(err) + } + c.LogRequest(gvr, ts, user, agent, verb) + } +} + +func withPerUserAPIRequestCount(user, userAgent string, options ...func(*apiv1.PerUserAPIRequestCount)) func(*apiv1.PerNodeAPIRequestLog) { + return func(nodeRequestLog *apiv1.PerNodeAPIRequestLog) { + requestUser := &apiv1.PerUserAPIRequestCount{ + UserName: user, + UserAgent: userAgent, + } + for _, f := range options { + f(requestUser) + } + nodeRequestLog.ByUser = append(nodeRequestLog.ByUser, *requestUser) + } +} + +func withRequestCount(verb string, count int64) func(user *apiv1.PerUserAPIRequestCount) { + return func(requestUser *apiv1.PerUserAPIRequestCount) { + requestCount := apiv1.PerVerbAPIRequestCount{Verb: verb, RequestCount: count} + requestUser.ByVerb = append(requestUser.ByVerb, requestCount) + requestUser.RequestCount += count + } +} + +func withAdditionalRequestCounts(hour int, node string, counts int) func(map[int]map[string]int64) { + return func(m map[int]map[string]int64) { + if _, ok := m[hour]; !ok { + m[hour] = map[string]int64{} + } + m[hour][node] = m[hour][node] + int64(counts) + } +} + +func withComputedRequestCountTotals(options ...func(map[int]map[string]int64)) func(*apiv1.APIRequestCountStatus) { + additionalCounts := map[int]map[string]int64{} + for _, f := range options { + f(additionalCounts) + } + return func(status *apiv1.APIRequestCountStatus) { + totalForDay := int64(0) + for hourIndex, hourlyCount := range status.Last24h { + totalForHour := int64(0) + for nodeIndex, nodeCount := range hourlyCount.ByNode { + totalForNode := int64(0) + for _, userCount := range nodeCount.ByUser { + totalForNode += userCount.RequestCount + } + totalForNode += additionalCounts[hourIndex][nodeCount.NodeName] + // only set the perNode count if it is not set already + if status.Last24h[hourIndex].ByNode[nodeIndex].RequestCount == 0 { + status.Last24h[hourIndex].ByNode[nodeIndex].RequestCount = totalForNode + } + totalForHour += status.Last24h[hourIndex].ByNode[nodeIndex].RequestCount + } + status.Last24h[hourIndex].RequestCount = totalForHour + totalForDay += totalForHour + } + status.RequestCount = totalForDay + + totalForCurrentHour := int64(0) + for nodeIndex, nodeCount := range status.CurrentHour.ByNode { + totalForNode := int64(0) + for _, userCount := range nodeCount.ByUser { + totalForNode += userCount.RequestCount + } + // only set the perNode count if it is not set already + if status.CurrentHour.ByNode[nodeIndex].RequestCount == 0 { + status.CurrentHour.ByNode[nodeIndex].RequestCount = totalForNode + } + totalForCurrentHour += status.CurrentHour.ByNode[nodeIndex].RequestCount + } + status.CurrentHour.RequestCount = totalForCurrentHour + } +} + +func apiRequestCount(n string, options ...func(*apiv1.APIRequestCount)) *apiv1.APIRequestCount { + arc := &apiv1.APIRequestCount{ + ObjectMeta: metav1.ObjectMeta{Name: n}, + Spec: apiv1.APIRequestCountSpec{NumberOfUsersToReport: 10}, + } + for _, f := range options { + f(arc) + } + return arc +} + +func withStatus(options ...func(*apiv1.APIRequestCountStatus)) func(*apiv1.APIRequestCount) { + return func(arc *apiv1.APIRequestCount) { + arc.Status = *apiRequestCountStatus(options...) + } +} + +func apiRequestCountStatus(options ...func(*apiv1.APIRequestCountStatus)) *apiv1.APIRequestCountStatus { + status := &apiv1.APIRequestCountStatus{} + for _, f := range options { + f(status) + } + return status +} + +func requestLog(options ...func(*apiv1.PerResourceAPIRequestLog)) apiv1.PerResourceAPIRequestLog { + requestLog := &apiv1.PerResourceAPIRequestLog{} + for _, f := range options { + f(requestLog) + } + return *requestLog +} + +func withRequestLastHour(options ...func(*apiv1.PerResourceAPIRequestLog)) func(*apiv1.APIRequestCountStatus) { + return func(status *apiv1.APIRequestCountStatus) { + status.CurrentHour = requestLog(options...) + } +} + +func withRequestLast24hN(hours string, options ...func(*apiv1.PerResourceAPIRequestLog)) func(*apiv1.APIRequestCountStatus) { + var hrs []int + for _, s := range strings.Split(hours, ",") { + from, to := 0, 23 + var err error + switch { + case s == "*": + case strings.Contains(s, "-"): + rs := strings.Split(s, "-") + if from, err = strconv.Atoi(rs[0]); err != nil { + panic(err) + } + if to, err = strconv.Atoi(rs[1]); err != nil { + panic(err) + } + default: + if from, err = strconv.Atoi(s); err != nil { + panic(err) + } + to = from + } + for i := from; i <= to; i++ { + hrs = append(hrs, i) + } + } + sort.Ints(hrs) + var fns []func(*apiv1.APIRequestCountStatus) + for _, h := range hrs { + fns = append(fns, withRequestLast24h(h, options...)) + } + return func(status *apiv1.APIRequestCountStatus) { + for _, f := range fns { + f(status) + } + } +} + +func withRequestLast24h(hour int, options ...func(*apiv1.PerResourceAPIRequestLog)) func(*apiv1.APIRequestCountStatus) { + return func(status *apiv1.APIRequestCountStatus) { + if status.Last24h == nil { + status.Last24h = make([]apiv1.PerResourceAPIRequestLog, 24) + } + status.Last24h[hour] = requestLog(options...) + } +} + +func withPerNodeAPIRequestLog(node string, options ...func(*apiv1.PerNodeAPIRequestLog)) func(*apiv1.PerResourceAPIRequestLog) { + return func(log *apiv1.PerResourceAPIRequestLog) { + nodeRequestLog := &apiv1.PerNodeAPIRequestLog{NodeName: node} + for _, f := range options { + f(nodeRequestLog) + } + log.ByNode = append(log.ByNode, *nodeRequestLog) + } +} + +func withPerNodeRequestCount(requestCount int64) func(*apiv1.PerNodeAPIRequestLog) { + return func(log *apiv1.PerNodeAPIRequestLog) { + log.RequestCount = requestCount + } +} + +func cluster(options ...func(*clusterRequestCounts)) *clusterRequestCounts { + c := &clusterRequestCounts{nodeToRequestCount: map[string]*apiRequestCounts{}} + for _, f := range options { + f(c) + } + return c +} + +func withNode(name string, options ...func(counts *apiRequestCounts)) func(*clusterRequestCounts) { + return func(c *clusterRequestCounts) { + n := &apiRequestCounts{ + nodeName: name, + resourceToRequestCount: map[schema.GroupVersionResource]*resourceRequestCounts{}, + } + for _, f := range options { + f(n) + } + c.nodeToRequestCount[name] = n + } +} + +func resource(resource string, options ...func(counts *resourceRequestCounts)) *resourceRequestCounts { + gvr := gvr(resource) + r := &resourceRequestCounts{ + resource: gvr, + hourToRequestCount: make(map[int]*hourlyRequestCounts, 24), + } + for _, f := range options { + f(r) + } + return r +} + +func withResource(r string, options ...func(counts *resourceRequestCounts)) func(*apiRequestCounts) { + gvr := gvr(r) + return func(n *apiRequestCounts) { + n.resourceToRequestCount[gvr] = resource(r, options...) + } +} + +func withHour(hour int, options ...func(counts *hourlyRequestCounts)) func(counts *resourceRequestCounts) { + return func(r *resourceRequestCounts) { + h := &hourlyRequestCounts{ + usersToRequestCounts: map[userKey]*userRequestCounts{}, + } + for _, f := range options { + f(h) + } + r.hourToRequestCount[hour] = h + } +} + +func withCountToSuppress(countToSuppress int64) func(counts *hourlyRequestCounts) { + return func(h *hourlyRequestCounts) { + h.countToSuppress = countToSuppress + } +} + +func withUser(user, userAgent string, options ...func(*userRequestCounts)) func(counts *hourlyRequestCounts) { + return func(h *hourlyRequestCounts) { + u := &userRequestCounts{ + user: userKey{ + user: user, + userAgent: userAgent, + }, + verbsToRequestCounts: map[string]*verbRequestCount{}, + } + for _, f := range options { + f(u) + } + h.usersToRequestCounts[u.user] = u + } +} + +func withCounts(verb string, count int64) func(*userRequestCounts) { + return func(u *userRequestCounts) { + u.verbsToRequestCounts[verb] = &verbRequestCount{count: count} + } +} + +func Test_removePersistedRequestCounts(t *testing.T) { + + type args struct { + nodeName string + currentHour int + persistedStatus *apiv1.APIRequestCountStatus + localResourceCount *resourceRequestCounts + } + tests := []struct { + name string + args args + expected *resourceRequestCounts + }{ + { + name: "other-hours-gone", + args: args{ + nodeName: "node1", + currentHour: 6, + persistedStatus: apiRequestCountStatus( + withRequestLastHour(withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("mia", "mia-agent", withRequestCount("delete", 1386)), + withPerUserAPIRequestCount("eva", "eva-agent", withRequestCount("get", 725), withRequestCount("watch", 640)), + )), + withRequestLast24h(4, withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("eva", "eva-agent", withRequestCount("get", 625), withRequestCount("watch", 540)), + )), + withRequestLast24h(5, withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("mia", "mia-agent", withRequestCount("delete", 1386)), + withPerUserAPIRequestCount("eva", "eva-agent", withRequestCount("get", 725), withRequestCount("watch", 640)), + )), + withComputedRequestCountTotals(), + ), + localResourceCount: resource("test.v1.group", + withHour(4, + withUser("bob", "bob-agent", withCounts("get", 41), withCounts("watch", 63)), + ), + withHour(5, + withUser("mia", "mia-agent", withCounts("delete", 712)), + ), + ), + }, + expected: resource("test.v1.group", + withHour(6), + ), + }, + { + name: "remove persisted user, keep non-persisted user", + args: args{ + nodeName: "node1", + currentHour: 5, + persistedStatus: apiRequestCountStatus( + withRequestLastHour(withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("mia", "mia-agent", withRequestCount("delete", 1386)), + withPerUserAPIRequestCount("eva", "eva-agent", withRequestCount("get", 725), withRequestCount("watch", 640)), + )), + withRequestLast24h(4, withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("eva", "eva-agent", withRequestCount("get", 625), withRequestCount("watch", 540)), + )), + withRequestLast24h(5, withPerNodeAPIRequestLog("node1", + withPerUserAPIRequestCount("mia", "mia-agent", withRequestCount("delete", 1386)), + withPerUserAPIRequestCount("eva", "eva-agent", withRequestCount("get", 725), withRequestCount("watch", 640)), + )), + withComputedRequestCountTotals(), + ), + localResourceCount: resource("test.v1.group", + withHour(4, + withUser("bob", "bob-agent", withCounts("get", 41), withCounts("watch", 63)), + ), + withHour(5, + withUser("mark", "mark-agent", withCounts("delete", 5)), + withUser("mia", "mia-agent", withCounts("delete", 712)), + ), + ), + }, + expected: resource("test.v1.group", + withHour(5, + withCountToSuppress(5), + withUser("mark", "mark-agent", withCounts("delete", 5)), + ), + ), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + removePersistedRequestCounts(tt.args.nodeName, tt.args.currentHour, tt.args.persistedStatus, tt.args.localResourceCount) + if !tt.expected.Equals(tt.args.localResourceCount) { + t.Error(diff.StringDiff(tt.expected.String(), tt.args.localResourceCount.String())) + } + }) + } +} diff --git a/openshift-kube-apiserver/filters/apirequestcount/deprecated.go b/openshift-kube-apiserver/filters/apirequestcount/deprecated.go new file mode 100644 index 0000000000000..66519b98df9b7 --- /dev/null +++ b/openshift-kube-apiserver/filters/apirequestcount/deprecated.go @@ -0,0 +1,70 @@ +package apirequestcount + +import "k8s.io/apimachinery/pkg/runtime/schema" + +var deprecatedApiRemovedRelease = map[schema.GroupVersionResource]string{ + // Kubernetes APIs + {Group: "apps", Version: "v1beta1", Resource: "controllerrevisions"}: "1.16", + {Group: "apps", Version: "v1beta1", Resource: "deploymentrollbacks"}: "1.16", + {Group: "apps", Version: "v1beta1", Resource: "deployments"}: "1.16", + {Group: "apps", Version: "v1beta1", Resource: "scales"}: "1.16", + {Group: "apps", Version: "v1beta1", Resource: "statefulsets"}: "1.16", + {Group: "apps", Version: "v1beta2", Resource: "controllerrevisions"}: "1.16", + {Group: "apps", Version: "v1beta2", Resource: "daemonsets"}: "1.16", + {Group: "apps", Version: "v1beta2", Resource: "deployments"}: "1.16", + {Group: "apps", Version: "v1beta2", Resource: "replicasets"}: "1.16", + {Group: "apps", Version: "v1beta2", Resource: "scales"}: "1.16", + {Group: "apps", Version: "v1beta2", Resource: "statefulsets"}: "1.16", + {Group: "extensions", Version: "v1beta1", Resource: "daemonsets"}: "1.16", + {Group: "extensions", Version: "v1beta1", Resource: "deploymentrollbacks"}: "1.16", + {Group: "extensions", Version: "v1beta1", Resource: "deployments"}: "1.16", + {Group: "extensions", Version: "v1beta1", Resource: "networkpolicies"}: "1.16", + {Group: "extensions", Version: "v1beta1", Resource: "podsecuritypolicies"}: "1.16", + {Group: "extensions", Version: "v1beta1", Resource: "replicasets"}: "1.16", + {Group: "extensions", Version: "v1beta1", Resource: "scales"}: "1.16", + {Group: "flowcontrol.apiserver.k8s.io", Version: "v1alpha1", Resource: "flowschemas"}: "1.21", + {Group: "flowcontrol.apiserver.k8s.io", Version: "v1alpha1", Resource: "prioritylevelconfigurations"}: "1.21", + {Group: "admissionregistration.k8s.io", Version: "v1beta1", Resource: "mutatingwebhookconfigurations"}: "1.22", + {Group: "admissionregistration.k8s.io", Version: "v1beta1", Resource: "validatingwebhookconfigurations"}: "1.22", + {Group: "apiextensions.k8s.io", Version: "v1beta1", Resource: "customresourcedefinitions"}: "1.22", + {Group: "apiregistration.k8s.io", Version: "v1beta1", Resource: "apiservices"}: "1.22", + {Group: "authentication.k8s.io", Version: "v1beta1", Resource: "tokenreviews"}: "1.22", + {Group: "authorization.k8s.io", Version: "v1beta1", Resource: "localsubjectaccessreviews"}: "1.22", + {Group: "authorization.k8s.io", Version: "v1beta1", Resource: "selfsubjectaccessreviews"}: "1.22", + {Group: "authorization.k8s.io", Version: "v1beta1", Resource: "selfsubjectrulesreviews"}: "1.22", + {Group: "authorization.k8s.io", Version: "v1beta1", Resource: "subjectaccessreviews"}: "1.22", + {Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"}: "1.22", + {Group: "coordination.k8s.io", Version: "v1beta1", Resource: "leases"}: "1.22", + {Group: "extensions", Version: "v1beta1", Resource: "ingresses"}: "1.22", + {Group: "networking.k8s.io", Version: "v1beta1", Resource: "ingresses"}: "1.22", + {Group: "networking.k8s.io", Version: "v1beta1", Resource: "ingressclasses"}: "1.22", + {Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "clusterrolebindings"}: "1.22", + {Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "clusterroles"}: "1.22", + {Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "rolebindings"}: "1.22", + {Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "roles"}: "1.22", + {Group: "scheduling.k8s.io", Version: "v1beta1", Resource: "priorityclasses"}: "1.22", + {Group: "storage.k8s.io", Version: "v1beta1", Resource: "csidrivers"}: "1.22", + {Group: "storage.k8s.io", Version: "v1beta1", Resource: "csinodes"}: "1.22", + {Group: "storage.k8s.io", Version: "v1beta1", Resource: "storageclasses"}: "1.22", + {Group: "storage.k8s.io", Version: "v1beta1", Resource: "volumeattachments"}: "1.22", + {Group: "batch", Version: "v1beta1", Resource: "cronjobs"}: "1.25", + {Group: "discovery.k8s.io", Version: "v1beta1", Resource: "endpointslices"}: "1.25", + {Group: "events.k8s.io", Version: "v1beta1", Resource: "events"}: "1.25", + {Group: "autoscaling", Version: "v2beta1", Resource: "horizontalpodautoscalers"}: "1.25", + {Group: "policy", Version: "v1beta1", Resource: "poddisruptionbudgets"}: "1.25", + {Group: "policy", Version: "v1beta1", Resource: "podsecuritypolicies"}: "1.25", + {Group: "node.k8s.io", Version: "v1beta1", Resource: "runtimeclasses"}: "1.25", + {Group: "autoscaling", Version: "v2beta2", Resource: "horizontalpodautoscalers"}: "1.26", + {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Resource: "flowschemas"}: "1.26", + {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Resource: "prioritylevelconfigurations"}: "1.26", + {Group: "storage.k8s.io", Version: "v1beta1", Resource: "csistoragecapacities"}: "1.27", + {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Resource: "flowschemas"}: "1.29", + {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Resource: "prioritylevelconfigurations"}: "1.29", + // OpenShift APIs + {Group: "operator.openshift.io", Version: "v1beta1", Resource: "kubedeschedulers"}: "1.22", +} + +// removedRelease of a specified resource.version.group. +func removedRelease(resource schema.GroupVersionResource) string { + return deprecatedApiRemovedRelease[resource] +} diff --git a/openshift-kube-apiserver/filters/apirequestcount/request_counts.go b/openshift-kube-apiserver/filters/apirequestcount/request_counts.go new file mode 100644 index 0000000000000..3f1d88b33a081 --- /dev/null +++ b/openshift-kube-apiserver/filters/apirequestcount/request_counts.go @@ -0,0 +1,442 @@ +package apirequestcount + +import ( + "fmt" + "sort" + "strings" + "sync" + "sync/atomic" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +type clusterRequestCounts struct { + lock sync.RWMutex + nodeToRequestCount map[string]*apiRequestCounts +} + +func newClusterRequestCounts() *clusterRequestCounts { + return &clusterRequestCounts{ + nodeToRequestCount: map[string]*apiRequestCounts{}, + } +} + +func (c *clusterRequestCounts) Node(nodeName string) *apiRequestCounts { + c.lock.RLock() + ret, ok := c.nodeToRequestCount[nodeName] + c.lock.RUnlock() + if ok { + return ret + } + + c.lock.Lock() + defer c.lock.Unlock() + if _, ok := c.nodeToRequestCount[nodeName]; !ok { + c.nodeToRequestCount[nodeName] = newAPIRequestCounts(nodeName) + } + return c.nodeToRequestCount[nodeName] +} + +func (c *clusterRequestCounts) IncrementRequestCount(node string, resource schema.GroupVersionResource, hour int, user userKey, verb string, count int64) { + c.Node(node).IncrementRequestCount(resource, hour, user, verb, count) +} + +func (c *clusterRequestCounts) String() string { + c.lock.RLock() + defer c.lock.RUnlock() + + mapStrings := []string{} + for _, k := range sets.StringKeySet(c.nodeToRequestCount).List() { + mapStrings = append(mapStrings, fmt.Sprintf("%q: %v", k, c.nodeToRequestCount[k])) + } + return fmt.Sprintf("nodeToRequestCount: {%v}", strings.Join(mapStrings, ", ")) +} + +type apiRequestCounts struct { + lock sync.RWMutex + nodeName string + resourceToRequestCount map[schema.GroupVersionResource]*resourceRequestCounts +} + +func newAPIRequestCounts(nodeName string) *apiRequestCounts { + return &apiRequestCounts{ + nodeName: nodeName, + resourceToRequestCount: map[schema.GroupVersionResource]*resourceRequestCounts{}, + } +} + +func (c *apiRequestCounts) Resource(resource schema.GroupVersionResource) *resourceRequestCounts { + c.lock.RLock() + ret, ok := c.resourceToRequestCount[resource] + c.lock.RUnlock() + if ok { + return ret + } + + c.lock.Lock() + defer c.lock.Unlock() + if _, ok := c.resourceToRequestCount[resource]; !ok { + c.resourceToRequestCount[resource] = newResourceRequestCounts(resource) + } + return c.resourceToRequestCount[resource] +} + +func (c *apiRequestCounts) Add(requestCounts *apiRequestCounts) { + for resource := range requestCounts.resourceToRequestCount { + c.Resource(resource).Add(requestCounts.Resource(resource)) + } +} + +func (c *apiRequestCounts) IncrementRequestCount(resource schema.GroupVersionResource, hour int, user userKey, verb string, count int64) { + c.Resource(resource).IncrementRequestCount(hour, user, verb, count) +} + +func (c *apiRequestCounts) ExpireOldestCounts(expiredHour int) { + c.lock.Lock() + defer c.lock.Unlock() + for _, resource := range c.resourceToRequestCount { + resource.ExpireOldestCounts(expiredHour) + } +} + +func (c *apiRequestCounts) RemoveResource(resource schema.GroupVersionResource) { + c.lock.Lock() + defer c.lock.Unlock() + delete(c.resourceToRequestCount, resource) +} + +func (c *apiRequestCounts) Equals(rhs *apiRequestCounts) bool { + if c.nodeName != rhs.nodeName { + return false + } + + c.lock.RLock() + defer c.lock.RUnlock() + rhs.lock.RLock() + defer rhs.lock.RUnlock() + + if len(c.resourceToRequestCount) != len(rhs.resourceToRequestCount) { + return false + } + + for k, lhsV := range c.resourceToRequestCount { + rhsV, ok := rhs.resourceToRequestCount[k] + if !ok { + return false + } + if !lhsV.Equals(rhsV) { + return false + } + } + return true +} + +func (c *apiRequestCounts) String() string { + c.lock.RLock() + defer c.lock.RUnlock() + + lookup := map[string]schema.GroupVersionResource{} + for k := range c.resourceToRequestCount { + lookup[k.String()] = k + } + mapStrings := []string{} + for _, k := range sets.StringKeySet(lookup).List() { + mapStrings = append(mapStrings, fmt.Sprintf("%q: %v", k, c.resourceToRequestCount[lookup[k]])) + } + return fmt.Sprintf("resource: %v, resourceToRequestCount: {%v}", c.resourceToRequestCount, strings.Join(mapStrings, ", ")) +} + +type resourceRequestCounts struct { + lock sync.RWMutex + resource schema.GroupVersionResource + hourToRequestCount map[int]*hourlyRequestCounts +} + +func newResourceRequestCounts(resource schema.GroupVersionResource) *resourceRequestCounts { + return &resourceRequestCounts{ + resource: resource, + hourToRequestCount: map[int]*hourlyRequestCounts{}, + } +} + +func (c *resourceRequestCounts) Hour(hour int) *hourlyRequestCounts { + c.lock.RLock() + ret, ok := c.hourToRequestCount[hour] + c.lock.RUnlock() + if ok { + return ret + } + + c.lock.Lock() + defer c.lock.Unlock() + if _, ok := c.hourToRequestCount[hour]; !ok { + c.hourToRequestCount[hour] = newHourlyRequestCounts() + } + return c.hourToRequestCount[hour] +} + +func (c *resourceRequestCounts) ExpireOldestCounts(expiredHour int) { + c.lock.Lock() + defer c.lock.Unlock() + delete(c.hourToRequestCount, expiredHour) +} + +func (c *resourceRequestCounts) Add(requestCounts *resourceRequestCounts) { + for hour, hourCount := range requestCounts.hourToRequestCount { + c.Hour(hour).Add(hourCount) + } +} + +func (c *resourceRequestCounts) IncrementRequestCount(hour int, user userKey, verb string, count int64) { + c.Hour(hour).IncrementRequestCount(user, verb, count) +} + +func (c *resourceRequestCounts) RemoveHour(hour int) { + c.lock.Lock() + defer c.lock.Unlock() + delete(c.hourToRequestCount, hour) +} + +func (c *resourceRequestCounts) Equals(rhs *resourceRequestCounts) bool { + if c.resource != rhs.resource { + return false + } + + c.lock.RLock() + defer c.lock.RUnlock() + rhs.lock.RLock() + defer rhs.lock.RUnlock() + + if len(c.hourToRequestCount) != len(rhs.hourToRequestCount) { + return false + } + + for k, lhsV := range c.hourToRequestCount { + rhsV, ok := rhs.hourToRequestCount[k] + if !ok { + return false + } + if !lhsV.Equals(rhsV) { + return false + } + } + return true +} + +func (c *resourceRequestCounts) String() string { + c.lock.RLock() + defer c.lock.RUnlock() + + mapStrings := []string{} + for _, k := range sets.IntKeySet(c.hourToRequestCount).List() { + mapStrings = append(mapStrings, fmt.Sprintf("%d: %v", k, c.hourToRequestCount[k].String())) + } + return fmt.Sprintf("resource: %v, hourToRequestCount: {%v}", c.resource, strings.Join(mapStrings, ", ")) +} + +type hourlyRequestCounts struct { + lock sync.RWMutex + // countToSuppress is the number of requests to remove from the count to avoid double counting in persistence + // TODO I think I'd like this in look-aside data, but I don't see an easy way to plumb it. + countToSuppress int64 + usersToRequestCounts map[userKey]*userRequestCounts +} + +func newHourlyRequestCounts() *hourlyRequestCounts { + return &hourlyRequestCounts{ + usersToRequestCounts: map[userKey]*userRequestCounts{}, + } +} + +func (c *hourlyRequestCounts) User(user userKey) *userRequestCounts { + c.lock.RLock() + ret, ok := c.usersToRequestCounts[user] + c.lock.RUnlock() + if ok { + return ret + } + + c.lock.Lock() + defer c.lock.Unlock() + if _, ok := c.usersToRequestCounts[user]; !ok { + c.usersToRequestCounts[user] = newUserRequestCounts(user) + } + return c.usersToRequestCounts[user] +} + +func (c *hourlyRequestCounts) Add(requestCounts *hourlyRequestCounts) { + for user, userCount := range requestCounts.usersToRequestCounts { + c.User(user).Add(userCount) + } + c.countToSuppress += requestCounts.countToSuppress +} + +func (c *hourlyRequestCounts) IncrementRequestCount(user userKey, verb string, count int64) { + c.User(user).IncrementRequestCount(verb, count) +} + +func (c *hourlyRequestCounts) RemoveUser(user userKey) { + c.lock.Lock() + defer c.lock.Unlock() + delete(c.usersToRequestCounts, user) +} + +func (c *hourlyRequestCounts) Equals(rhs *hourlyRequestCounts) bool { + c.lock.RLock() + defer c.lock.RUnlock() + rhs.lock.RLock() + defer rhs.lock.RUnlock() + + if c.countToSuppress != rhs.countToSuppress { + return false + } + + if len(c.usersToRequestCounts) != len(rhs.usersToRequestCounts) { + return false + } + + for k, lhsV := range c.usersToRequestCounts { + rhsV, ok := rhs.usersToRequestCounts[k] + if !ok { + return false + } + if !lhsV.Equals(rhsV) { + return false + } + } + return true +} + +func (c *hourlyRequestCounts) String() string { + c.lock.RLock() + defer c.lock.RUnlock() + + keys := []userKey{} + for k := range c.usersToRequestCounts { + keys = append(keys, k) + } + sort.Sort(byUserKey(keys)) + + mapStrings := []string{} + for _, k := range keys { + mapStrings = append(mapStrings, fmt.Sprintf("%q: %v", k, c.usersToRequestCounts[k].String())) + } + return fmt.Sprintf("countToSuppress=%d usersToRequestCounts: {%v}", c.countToSuppress, strings.Join(mapStrings, ", ")) +} + +type userKey struct { + user string + userAgent string +} + +type byUserKey []userKey + +func (s byUserKey) Len() int { + return len(s) +} +func (s byUserKey) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s byUserKey) Less(i, j int) bool { + userEquals := strings.Compare(s[i].user, s[j].user) + if userEquals != 0 { + return userEquals < 0 + } + return strings.Compare(s[i].userAgent, s[j].userAgent) < 0 +} + +type userRequestCounts struct { + lock sync.RWMutex + user userKey + verbsToRequestCounts map[string]*verbRequestCount +} + +func newUserRequestCounts(user userKey) *userRequestCounts { + return &userRequestCounts{ + user: user, + verbsToRequestCounts: map[string]*verbRequestCount{}, + } +} + +func (c *userRequestCounts) Verb(verb string) *verbRequestCount { + c.lock.RLock() + ret, ok := c.verbsToRequestCounts[verb] + c.lock.RUnlock() + if ok { + return ret + } + + c.lock.Lock() + defer c.lock.Unlock() + if _, ok := c.verbsToRequestCounts[verb]; !ok { + c.verbsToRequestCounts[verb] = &verbRequestCount{} + } + return c.verbsToRequestCounts[verb] +} + +func (c *userRequestCounts) Add(requestCounts *userRequestCounts) { + for verb := range requestCounts.verbsToRequestCounts { + c.Verb(verb).Add(requestCounts.Verb(verb).count) + } +} + +func (c *userRequestCounts) IncrementRequestCount(verb string, count int64) { + c.Verb(verb).IncrementRequestCount(count) +} + +func (c *userRequestCounts) Equals(rhs *userRequestCounts) bool { + if c.user != rhs.user { + return false + } + + c.lock.RLock() + defer c.lock.RUnlock() + rhs.lock.RLock() + defer rhs.lock.RUnlock() + + if len(c.verbsToRequestCounts) != len(rhs.verbsToRequestCounts) { + return false + } + + for k, lhsV := range c.verbsToRequestCounts { + rhsV, ok := rhs.verbsToRequestCounts[k] + if !ok { + return false + } + if !lhsV.Equals(rhsV) { + return false + } + } + return true +} + +func (c *userRequestCounts) String() string { + c.lock.RLock() + defer c.lock.RUnlock() + + mapStrings := []string{} + for _, k := range sets.StringKeySet(c.verbsToRequestCounts).List() { + mapStrings = append(mapStrings, fmt.Sprintf("%q: %v", k, c.verbsToRequestCounts[k])) + } + return fmt.Sprintf("user: %q, userAgent: %q, verbsToRequestCounts: {%v}", c.user.user, c.user.userAgent, strings.Join(mapStrings, ", ")) +} + +type verbRequestCount struct { + count int64 +} + +func (c *verbRequestCount) Add(count int64) { + atomic.AddInt64(&c.count, count) +} + +func (c *verbRequestCount) IncrementRequestCount(count int64) { + c.Add(count) +} + +func (c *verbRequestCount) Equals(rhs *verbRequestCount) bool { + lhsV := atomic.LoadInt64(&c.count) + rhsV := atomic.LoadInt64(&rhs.count) + return lhsV == rhsV +} diff --git a/openshift-kube-apiserver/filters/apirequestcount/request_counts_test.go b/openshift-kube-apiserver/filters/apirequestcount/request_counts_test.go new file mode 100644 index 0000000000000..dc389d8c27cdc --- /dev/null +++ b/openshift-kube-apiserver/filters/apirequestcount/request_counts_test.go @@ -0,0 +1,294 @@ +package apirequestcount + +import ( + "strings" + "testing" + "time" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/diff" +) + +func gvr(resource string) schema.GroupVersionResource { + s := strings.SplitN(resource, ".", 3) + switch len(s) { + case 3: + return schema.GroupVersionResource{Group: s[2], Version: s[1], Resource: s[0]} + case 2: + return schema.GroupVersionResource{Version: s[1], Resource: s[0]} + case 1: + return schema.GroupVersionResource{Resource: s[0]} + } + panic(s) +} + +var ( + bobKey = userKey{ + user: "bob", + userAgent: "some-agent", + } + sueKey = userKey{ + user: "sue", + userAgent: "some-agent", + } + genericUserKey = userKey{ + user: "user", + userAgent: "some-agent", + } +) + +func TestAPIRequestCounts_IncrementRequestCount(t *testing.T) { + testCases := []struct { + resource schema.GroupVersionResource + ts time.Time + user userKey + verb string + count int64 + }{ + {gvr("test.v1.group"), testTime(0, 0), bobKey, "get", 1}, + {gvr("test.v1.group"), testTime(0, 1), bobKey, "list", 2}, + {gvr("test.v1.group"), testTime(1, 0), bobKey, "get", 1}, + {gvr("test.v2.group"), testTime(2, 0), bobKey, "get", 1}, + {gvr("test.v2.group"), testTime(2, 1), sueKey, "list", 2}, + {gvr("test.v2.group"), testTime(2, 2), sueKey, "get", 1}, + {gvr("test.v2.group"), testTime(2, 3), sueKey, "get", 3}, + } + actual := newAPIRequestCounts("nodeName") + for _, tc := range testCases { + actual.IncrementRequestCount(tc.resource, tc.ts.Hour(), tc.user, tc.verb, tc.count) + } + expected := &apiRequestCounts{ + nodeName: "nodeName", + resourceToRequestCount: map[schema.GroupVersionResource]*resourceRequestCounts{ + gvr("test.v1.group"): { + resource: gvr("test.v1.group"), + hourToRequestCount: map[int]*hourlyRequestCounts{ + 0: { + usersToRequestCounts: map[userKey]*userRequestCounts{ + bobKey: {user: bobKey, verbsToRequestCounts: map[string]*verbRequestCount{"get": {count: 1}, "list": {count: 2}}}, + }}, + 1: { + usersToRequestCounts: map[userKey]*userRequestCounts{ + bobKey: {user: bobKey, verbsToRequestCounts: map[string]*verbRequestCount{"get": {count: 1}}}, + }, + }, + }}, + gvr("test.v2.group"): { + resource: gvr("test.v2.group"), + hourToRequestCount: map[int]*hourlyRequestCounts{ + 2: { + usersToRequestCounts: map[userKey]*userRequestCounts{ + bobKey: {user: bobKey, verbsToRequestCounts: map[string]*verbRequestCount{"get": {count: 1}}}, + sueKey: {user: sueKey, verbsToRequestCounts: map[string]*verbRequestCount{"list": {count: 2}, "get": {count: 4}}}, + }}, + }}, + }, + } + + if !actual.Equals(expected) { + t.Error(diff.StringDiff(expected.String(), actual.String())) + } +} + +func TestAPIRequestCounts_IncrementRequestCounts(t *testing.T) { + testCases := []struct { + name string + existing *apiRequestCounts + additional *apiRequestCounts + expected *apiRequestCounts + }{ + { + name: "BothEmpty", + existing: &apiRequestCounts{}, + additional: &apiRequestCounts{}, + expected: &apiRequestCounts{}, + }, + { + name: "TargetEmpty", + existing: newAPIRequestCounts(""), + additional: &apiRequestCounts{ + resourceToRequestCount: map[schema.GroupVersionResource]*resourceRequestCounts{ + gvr("resource.."): { + resource: gvr("resource"), + hourToRequestCount: map[int]*hourlyRequestCounts{ + 0: { + usersToRequestCounts: map[userKey]*userRequestCounts{ + genericUserKey: {user: genericUserKey, verbsToRequestCounts: map[string]*verbRequestCount{"verb": {1}}}, + }, + }, + }}, + }, + }, + expected: &apiRequestCounts{ + resourceToRequestCount: map[schema.GroupVersionResource]*resourceRequestCounts{ + gvr("resource.."): { + resource: gvr("resource"), + hourToRequestCount: map[int]*hourlyRequestCounts{ + 0: { + usersToRequestCounts: map[userKey]*userRequestCounts{ + genericUserKey: {user: genericUserKey, verbsToRequestCounts: map[string]*verbRequestCount{"verb": {1}}}, + }, + }, + }}, + }, + }, + }, + { + name: "SourceEmpty", + existing: &apiRequestCounts{ + resourceToRequestCount: map[schema.GroupVersionResource]*resourceRequestCounts{ + gvr("resource.."): { + resource: gvr("resource"), + hourToRequestCount: map[int]*hourlyRequestCounts{ + 0: { + usersToRequestCounts: map[userKey]*userRequestCounts{ + genericUserKey: {user: genericUserKey, verbsToRequestCounts: map[string]*verbRequestCount{"verb": {1}}}, + }, + }, + }}, + }, + }, + additional: &apiRequestCounts{resourceToRequestCount: map[schema.GroupVersionResource]*resourceRequestCounts{}}, + expected: &apiRequestCounts{ + resourceToRequestCount: map[schema.GroupVersionResource]*resourceRequestCounts{ + gvr("resource.."): { + resource: gvr("resource"), + hourToRequestCount: map[int]*hourlyRequestCounts{ + 0: { + usersToRequestCounts: map[userKey]*userRequestCounts{ + genericUserKey: {user: genericUserKey, verbsToRequestCounts: map[string]*verbRequestCount{"verb": {1}}}, + }, + }, + }}, + }, + }, + }, + { + name: "MergeCount", + existing: &apiRequestCounts{ + resourceToRequestCount: map[schema.GroupVersionResource]*resourceRequestCounts{ + gvr("resource.."): { + resource: gvr("resource"), + hourToRequestCount: map[int]*hourlyRequestCounts{ + 0: { + usersToRequestCounts: map[userKey]*userRequestCounts{ + genericUserKey: {user: genericUserKey, verbsToRequestCounts: map[string]*verbRequestCount{"verb": {1}}}, + }, + }, + }}, + }, + }, + additional: &apiRequestCounts{ + resourceToRequestCount: map[schema.GroupVersionResource]*resourceRequestCounts{ + gvr("resource.."): { + resource: gvr("resource"), + hourToRequestCount: map[int]*hourlyRequestCounts{ + 0: { + usersToRequestCounts: map[userKey]*userRequestCounts{ + genericUserKey: {user: genericUserKey, verbsToRequestCounts: map[string]*verbRequestCount{"verb": {2}}}, + }, + }, + }}, + }, + }, + expected: &apiRequestCounts{ + resourceToRequestCount: map[schema.GroupVersionResource]*resourceRequestCounts{ + gvr("resource.."): { + resource: gvr("resource"), + hourToRequestCount: map[int]*hourlyRequestCounts{ + 0: { + usersToRequestCounts: map[userKey]*userRequestCounts{ + genericUserKey: {user: genericUserKey, verbsToRequestCounts: map[string]*verbRequestCount{"verb": {3}}}, + }, + }, + }}, + }, + }, + }, + { + name: "Merge", + existing: &apiRequestCounts{ + resourceToRequestCount: map[schema.GroupVersionResource]*resourceRequestCounts{ + gvr("resource.v1."): { + resource: gvr("resource.v1"), + hourToRequestCount: map[int]*hourlyRequestCounts{ + 0: { + usersToRequestCounts: map[userKey]*userRequestCounts{ + bobKey: {user: bobKey, verbsToRequestCounts: map[string]*verbRequestCount{"get": {1}}}, + }, + }, + }}, + }, + }, + additional: &apiRequestCounts{ + resourceToRequestCount: map[schema.GroupVersionResource]*resourceRequestCounts{ + gvr("resource.v1."): { + resource: gvr("resource.v1"), + hourToRequestCount: map[int]*hourlyRequestCounts{ + 0: { + usersToRequestCounts: map[userKey]*userRequestCounts{ + bobKey: {user: bobKey, verbsToRequestCounts: map[string]*verbRequestCount{"get": {2}, "post": {1}}}, + sueKey: {user: sueKey, verbsToRequestCounts: map[string]*verbRequestCount{"get": {5}}}, + }, + }, + 2: { + usersToRequestCounts: map[userKey]*userRequestCounts{ + bobKey: {user: bobKey, verbsToRequestCounts: map[string]*verbRequestCount{"get": {1}}}, + }, + }, + }}, + gvr("resource.v2."): { + resource: gvr("resource.v2"), + hourToRequestCount: map[int]*hourlyRequestCounts{ + 0: { + usersToRequestCounts: map[userKey]*userRequestCounts{ + genericUserKey: {user: genericUserKey, verbsToRequestCounts: map[string]*verbRequestCount{"get": {1}}}, + }, + }, + }}, + }, + }, + expected: &apiRequestCounts{ + resourceToRequestCount: map[schema.GroupVersionResource]*resourceRequestCounts{ + gvr("resource.v1."): { + resource: gvr("resource.v1"), + hourToRequestCount: map[int]*hourlyRequestCounts{ + 0: { + usersToRequestCounts: map[userKey]*userRequestCounts{ + bobKey: {user: bobKey, verbsToRequestCounts: map[string]*verbRequestCount{"get": {3}, "post": {1}}}, + sueKey: {user: sueKey, verbsToRequestCounts: map[string]*verbRequestCount{"get": {5}}}, + }, + }, + 2: { + usersToRequestCounts: map[userKey]*userRequestCounts{ + bobKey: {user: bobKey, verbsToRequestCounts: map[string]*verbRequestCount{"get": {1}}}, + }, + }, + }}, + gvr("resource.v2."): { + resource: gvr("resource.v2"), + hourToRequestCount: map[int]*hourlyRequestCounts{ + 0: { + usersToRequestCounts: map[userKey]*userRequestCounts{ + genericUserKey: {user: genericUserKey, verbsToRequestCounts: map[string]*verbRequestCount{"get": {1}}}, + }, + }, + }}, + }, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tc.existing.Add(tc.additional) + + if !tc.existing.Equals(tc.expected) { + t.Error(diff.StringDiff(tc.expected.String(), tc.existing.String())) + } + }) + } +} + +func testTime(h, m int) time.Time { + return time.Date(1974, 9, 18, 0+h, 0+m, 0, 0, time.UTC) +} diff --git a/openshift-kube-apiserver/filters/apirequestcount/update_func.go b/openshift-kube-apiserver/filters/apirequestcount/update_func.go new file mode 100644 index 0000000000000..b0565ffc96475 --- /dev/null +++ b/openshift-kube-apiserver/filters/apirequestcount/update_func.go @@ -0,0 +1,213 @@ +package apirequestcount + +import ( + "sort" + "strings" + + apiv1 "github.com/openshift/api/apiserver/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/openshift-kube-apiserver/filters/apirequestcount/v1helpers" +) + +// SetRequestCountsForNode add additional api request counts to the log. +// countsToPersist must not be mutated +func SetRequestCountsForNode(nodeName string, currentHour, expiredHour int, countsToPersist *resourceRequestCounts) v1helpers.UpdateStatusFunc { + return func(maxNumUsers int, status *apiv1.APIRequestCountStatus) { + existingLogsFromAPI := apiStatusToRequestCount(countsToPersist.resource, status) + existingNodeLogFromAPI := existingLogsFromAPI.Node(nodeName) + existingNodeLogFromAPI.ExpireOldestCounts(expiredHour) + + // updatedCounts is an alias so we recognize this, but it is based on the newly computed struct so we don't destroy + // our input data. + updatedCounts := existingNodeLogFromAPI.Resource(countsToPersist.resource) + updatedCounts.Add(countsToPersist) + hourlyRequestLogs := resourceRequestCountToHourlyNodeRequestLog(nodeName, maxNumUsers, updatedCounts) + + newStatus := setRequestCountsForNode(status, nodeName, currentHour, expiredHour, hourlyRequestLogs) + status.Last24h = newStatus.Last24h + status.CurrentHour = newStatus.CurrentHour + status.RemovedInRelease = removedRelease(countsToPersist.resource) + status.RequestCount = newStatus.RequestCount + } +} + +func nodeStatusDefaulter(nodeName string, currentHour, expiredHour int, resource schema.GroupVersionResource) v1helpers.UpdateStatusFunc { + return SetRequestCountsForNode(nodeName, currentHour, expiredHour, newResourceRequestCounts(resource)) +} + +func setRequestCountsForNode(status *apiv1.APIRequestCountStatus, nodeName string, currentHour, expiredHour int, hourlyNodeRequests []apiv1.PerNodeAPIRequestLog) *apiv1.APIRequestCountStatus { + newStatus := status.DeepCopy() + newStatus.Last24h = []apiv1.PerResourceAPIRequestLog{} + newStatus.CurrentHour = apiv1.PerResourceAPIRequestLog{} + + for hour, currentNodeCount := range hourlyNodeRequests { + totalRequestThisHour := int64(0) + nextHourStatus := apiv1.PerResourceAPIRequestLog{} + if hour == expiredHour { + newStatus.Last24h = append(newStatus.Last24h, nextHourStatus) + continue + } + if len(status.Last24h) > hour { + for _, oldNodeStatus := range status.Last24h[hour].ByNode { + if oldNodeStatus.NodeName == nodeName { + continue + } + totalRequestThisHour += oldNodeStatus.RequestCount + nextHourStatus.ByNode = append(nextHourStatus.ByNode, *oldNodeStatus.DeepCopy()) + } + } + nextHourStatus.ByNode = append(nextHourStatus.ByNode, currentNodeCount) + totalRequestThisHour += currentNodeCount.RequestCount + nextHourStatus.RequestCount = totalRequestThisHour + + newStatus.Last24h = append(newStatus.Last24h, nextHourStatus) + } + + totalRequestsThisDay := int64(0) + for _, hourCount := range newStatus.Last24h { + totalRequestsThisDay += hourCount.RequestCount + } + newStatus.RequestCount = totalRequestsThisDay + + // get all our sorting before copying + canonicalizeStatus(newStatus) + newStatus.CurrentHour = newStatus.Last24h[currentHour] + + return newStatus +} + +// in this function we have exclusive access to resourceRequestCounts, so do the easy map navigation +func resourceRequestCountToHourlyNodeRequestLog(nodeName string, maxNumUsers int, resourceRequestCounts *resourceRequestCounts) []apiv1.PerNodeAPIRequestLog { + hourlyNodeRequests := []apiv1.PerNodeAPIRequestLog{} + for i := 0; i < 24; i++ { + hourlyNodeRequests = append(hourlyNodeRequests, + apiv1.PerNodeAPIRequestLog{ + NodeName: nodeName, + ByUser: nil, + }, + ) + } + + for hour, hourlyCount := range resourceRequestCounts.hourToRequestCount { + // be sure to suppress the "extra" added back into memory so we don't double count requests + totalRequestsThisHour := int64(0) - hourlyCount.countToSuppress + for userKey, userCount := range hourlyCount.usersToRequestCounts { + apiUserStatus := apiv1.PerUserAPIRequestCount{ + UserName: userKey.user, + UserAgent: userKey.userAgent, + RequestCount: 0, + ByVerb: nil, + } + totalCount := int64(0) + for verb, verbCount := range userCount.verbsToRequestCounts { + totalCount += verbCount.count + apiUserStatus.ByVerb = append(apiUserStatus.ByVerb, + apiv1.PerVerbAPIRequestCount{ + Verb: verb, + RequestCount: verbCount.count, + }) + } + apiUserStatus.RequestCount = totalCount + totalRequestsThisHour += totalCount + + // the api resource has an interesting property of only keeping the last few. Having a short list makes the sort faster + hasMaxEntries := len(hourlyNodeRequests[hour].ByUser) >= maxNumUsers + if hasMaxEntries { + // users are expected to be sorted by decending request count + currentSmallestCountIndex := len(hourlyNodeRequests[hour].ByUser) - 1 + currentSmallestCount := hourlyNodeRequests[hour].ByUser[currentSmallestCountIndex].RequestCount + if apiUserStatus.RequestCount <= currentSmallestCount { + // not in top numberOfUsersToReport + continue + } + // drop smallest user request count to make room + hourlyNodeRequests[hour].ByUser = hourlyNodeRequests[hour].ByUser[:currentSmallestCountIndex] + } + + hourlyNodeRequests[hour].ByUser = append(hourlyNodeRequests[hour].ByUser, apiUserStatus) + sort.Stable(sort.Reverse(byNumberOfUserRequests(hourlyNodeRequests[hour].ByUser))) + } + hourlyNodeRequests[hour].RequestCount = totalRequestsThisHour + } + + return hourlyNodeRequests +} + +func apiStatusToRequestCount(resource schema.GroupVersionResource, status *apiv1.APIRequestCountStatus) *clusterRequestCounts { + requestCount := newClusterRequestCounts() + for hour, hourlyCount := range status.Last24h { + for _, hourlyNodeCount := range hourlyCount.ByNode { + for _, hourNodeUserCount := range hourlyNodeCount.ByUser { + for _, hourlyNodeUserVerbCount := range hourNodeUserCount.ByVerb { + requestCount.IncrementRequestCount( + hourlyNodeCount.NodeName, + resource, + hour, + userKey{ + user: hourNodeUserCount.UserName, + userAgent: hourNodeUserCount.UserAgent, + }, + hourlyNodeUserVerbCount.Verb, + hourlyNodeUserVerbCount.RequestCount, + ) + } + } + } + } + return requestCount +} + +func canonicalizeStatus(status *apiv1.APIRequestCountStatus) { + for hour := range status.Last24h { + hourlyCount := status.Last24h[hour] + for j := range hourlyCount.ByNode { + nodeCount := hourlyCount.ByNode[j] + for k := range nodeCount.ByUser { + userCount := nodeCount.ByUser[k] + sort.Stable(byVerb(userCount.ByVerb)) + } + sort.Stable(sort.Reverse(byNumberOfUserRequests(nodeCount.ByUser))) + } + sort.Stable(byNode(status.Last24h[hour].ByNode)) + } + +} + +type byVerb []apiv1.PerVerbAPIRequestCount + +func (s byVerb) Len() int { + return len(s) +} +func (s byVerb) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s byVerb) Less(i, j int) bool { + return strings.Compare(s[i].Verb, s[j].Verb) < 0 +} + +type byNode []apiv1.PerNodeAPIRequestLog + +func (s byNode) Len() int { + return len(s) +} +func (s byNode) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s byNode) Less(i, j int) bool { + return strings.Compare(s[i].NodeName, s[j].NodeName) < 0 +} + +type byNumberOfUserRequests []apiv1.PerUserAPIRequestCount + +func (s byNumberOfUserRequests) Len() int { + return len(s) +} +func (s byNumberOfUserRequests) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s byNumberOfUserRequests) Less(i, j int) bool { + return s[i].RequestCount < s[j].RequestCount +} diff --git a/openshift-kube-apiserver/filters/apirequestcount/v1helpers/helpers.go b/openshift-kube-apiserver/filters/apirequestcount/v1helpers/helpers.go new file mode 100644 index 0000000000000..e5ebd7bc62598 --- /dev/null +++ b/openshift-kube-apiserver/filters/apirequestcount/v1helpers/helpers.go @@ -0,0 +1,71 @@ +package v1helpers + +import ( + "context" + + apiv1 "github.com/openshift/api/apiserver/v1" + apiv1client "github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" +) + +type UpdateStatusFunc func(maxNumUsers int, status *apiv1.APIRequestCountStatus) + +func ApplyStatus(ctx context.Context, client apiv1client.APIRequestCountInterface, name string, statusDefaulter UpdateStatusFunc, updateFuncs ...UpdateStatusFunc) (*apiv1.APIRequestCountStatus, bool, error) { + updated := false + var updatedStatus *apiv1.APIRequestCountStatus + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + existingOrDefaultAPIRequestCount, err := client.Get(ctx, name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + // APIRequestCount might have been purposely deleted. We will + // try to create it again further below if there is a need to. + existingOrDefaultAPIRequestCount = &apiv1.APIRequestCount{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + Spec: apiv1.APIRequestCountSpec{NumberOfUsersToReport: 10}, + } + // make sure the status doesn't result in a diff on a no-op. + statusDefaulter(10, &existingOrDefaultAPIRequestCount.Status) + } else if err != nil { + return err + } + oldStatus := existingOrDefaultAPIRequestCount.Status + newStatus := oldStatus.DeepCopy() + for _, update := range updateFuncs { + update(int(existingOrDefaultAPIRequestCount.Spec.NumberOfUsersToReport), newStatus) + } + if equality.Semantic.DeepEqual(&oldStatus, newStatus) { + updatedStatus = newStatus + return nil + } + + // At this point the status has been semantically changed by the updateFuncs, + // possibly due to new requests, hourly log expiration, and so on. + + existingAPIRequestCount, err := client.Get(ctx, name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + // APIRequestCount might have been purposely deleted, but new requests + // have come in, so let's re-create the APIRequestCount resource. + newAPIRequestCount := &apiv1.APIRequestCount{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + Spec: apiv1.APIRequestCountSpec{ + NumberOfUsersToReport: 10, + }, + } + existingAPIRequestCount, err = client.Create(ctx, newAPIRequestCount, metav1.CreateOptions{}) + } + if err != nil { + return err + } + existingAPIRequestCount.Status = *newStatus + updatedAPIRequestCount, err := client.UpdateStatus(ctx, existingAPIRequestCount, metav1.UpdateOptions{}) + if err != nil { + return err + } + updatedStatus = &updatedAPIRequestCount.Status + updated = true + return err + }) + return updatedStatus, updated, err +} diff --git a/openshift-kube-apiserver/filters/apirequestcount_filter.go b/openshift-kube-apiserver/filters/apirequestcount_filter.go new file mode 100644 index 0000000000000..12d1606d6fa7c --- /dev/null +++ b/openshift-kube-apiserver/filters/apirequestcount_filter.go @@ -0,0 +1,40 @@ +package filters + +import ( + "net/http" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/kubernetes/openshift-kube-apiserver/filters/apirequestcount" +) + +// WithAPIRequestCountLogging adds a handler that logs counts of api requests. +func WithAPIRequestCountLogging(handler http.Handler, requestLogger apirequestcount.APIRequestLogger) http.Handler { + handlerFunc := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + defer handler.ServeHTTP(w, req) + info, ok := request.RequestInfoFrom(req.Context()) + if !ok || !info.IsResourceRequest { + return + } + timestamp, ok := request.ReceivedTimestampFrom(req.Context()) + if !ok { + return + } + user, ok := request.UserFrom(req.Context()) + if !ok { + return + } + requestLogger.LogRequest( + schema.GroupVersionResource{ + Group: info.APIGroup, + Version: info.APIVersion, + Resource: info.Resource, + }, + timestamp, + user.GetName(), + req.UserAgent(), + info.Verb, + ) + }) + return handlerFunc +} diff --git a/openshift-kube-apiserver/openshiftkubeapiserver/flags.go b/openshift-kube-apiserver/openshiftkubeapiserver/flags.go new file mode 100644 index 0000000000000..a77253141fe29 --- /dev/null +++ b/openshift-kube-apiserver/openshiftkubeapiserver/flags.go @@ -0,0 +1,112 @@ +package openshiftkubeapiserver + +import ( + "fmt" + "io/ioutil" + "net" + "strings" + + configv1 "github.com/openshift/api/config/v1" + kubecontrolplanev1 "github.com/openshift/api/kubecontrolplane/v1" + "github.com/openshift/apiserver-library-go/pkg/configflags" + "github.com/openshift/library-go/pkg/config/helpers" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + apiserverv1alpha1 "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1" +) + +func ConfigToFlags(kubeAPIServerConfig *kubecontrolplanev1.KubeAPIServerConfig) ([]string, error) { + args := unmaskArgs(kubeAPIServerConfig.APIServerArguments) + + host, portString, err := net.SplitHostPort(kubeAPIServerConfig.ServingInfo.BindAddress) + if err != nil { + return nil, err + } + + admissionFlags, err := admissionFlags(kubeAPIServerConfig.AdmissionConfig) + if err != nil { + return nil, err + } + for flag, value := range admissionFlags { + configflags.SetIfUnset(args, flag, value...) + } + for flag, value := range configflags.AuditFlags(&kubeAPIServerConfig.AuditConfig, configflags.ArgsWithPrefix(args, "audit-")) { + configflags.SetIfUnset(args, flag, value...) + } + configflags.SetIfUnset(args, "bind-address", host) + configflags.SetIfUnset(args, "cors-allowed-origins", kubeAPIServerConfig.CORSAllowedOrigins...) + configflags.SetIfUnset(args, "secure-port", portString) + configflags.SetIfUnset(args, "service-account-key-file", kubeAPIServerConfig.ServiceAccountPublicKeyFiles...) + configflags.SetIfUnset(args, "service-cluster-ip-range", kubeAPIServerConfig.ServicesSubnet) + configflags.SetIfUnset(args, "tls-cipher-suites", kubeAPIServerConfig.ServingInfo.CipherSuites...) + configflags.SetIfUnset(args, "tls-min-version", kubeAPIServerConfig.ServingInfo.MinTLSVersion) + configflags.SetIfUnset(args, "tls-sni-cert-key", sniCertKeys(kubeAPIServerConfig.ServingInfo.NamedCertificates)...) + + return configflags.ToFlagSlice(args), nil +} + +func admissionFlags(admissionConfig configv1.AdmissionConfig) (map[string][]string, error) { + args := map[string][]string{} + + upstreamAdmissionConfig, err := ConvertOpenshiftAdmissionConfigToKubeAdmissionConfig(admissionConfig.PluginConfig) + if err != nil { + return nil, err + } + configBytes, err := helpers.WriteYAML(upstreamAdmissionConfig, apiserverv1alpha1.AddToScheme) + if err != nil { + return nil, err + } + + tempFile, err := ioutil.TempFile("", "kubeapiserver-admission-config.yaml") + if err != nil { + return nil, err + } + if _, err := tempFile.Write(configBytes); err != nil { + return nil, err + } + tempFile.Close() + + configflags.SetIfUnset(args, "admission-control-config-file", tempFile.Name()) + + return args, nil +} + +func sniCertKeys(namedCertificates []configv1.NamedCertificate) []string { + args := []string{} + for _, nc := range namedCertificates { + names := "" + if len(nc.Names) > 0 { + names = ":" + strings.Join(nc.Names, ",") + } + args = append(args, fmt.Sprintf("%s,%s%s", nc.CertFile, nc.KeyFile, names)) + } + return args +} + +func unmaskArgs(args map[string]kubecontrolplanev1.Arguments) map[string][]string { + ret := map[string][]string{} + for key, slice := range args { + for _, val := range slice { + ret[key] = append(ret[key], val) + } + } + return ret +} + +func ConvertOpenshiftAdmissionConfigToKubeAdmissionConfig(in map[string]configv1.AdmissionPluginConfig) (*apiserverv1alpha1.AdmissionConfiguration, error) { + ret := &apiserverv1alpha1.AdmissionConfiguration{} + + for _, pluginName := range sets.StringKeySet(in).List() { + kubeConfig := apiserverv1alpha1.AdmissionPluginConfiguration{ + Name: pluginName, + Path: in[pluginName].Location, + Configuration: &runtime.Unknown{ + Raw: in[pluginName].Configuration.Raw, + }, + } + + ret.Plugins = append(ret.Plugins, kubeConfig) + } + + return ret, nil +} diff --git a/openshift-kube-apiserver/openshiftkubeapiserver/flags_test.go b/openshift-kube-apiserver/openshiftkubeapiserver/flags_test.go new file mode 100644 index 0000000000000..3241b9b432981 --- /dev/null +++ b/openshift-kube-apiserver/openshiftkubeapiserver/flags_test.go @@ -0,0 +1,26 @@ +package openshiftkubeapiserver + +import ( + "testing" + + "github.com/openshift/api/config/v1" +) + +func TestSNICertKeys(t *testing.T) { + testCases := []struct { + names []string + expected string + }{ + {names: []string{"foo"}, expected: "secret.crt,secret.key:foo"}, + {names: []string{"foo", "bar"}, expected: "secret.crt,secret.key:foo,bar"}, + {expected: "secret.crt,secret.key"}, + } + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + result := sniCertKeys([]v1.NamedCertificate{{Names: tc.names, CertInfo: v1.CertInfo{CertFile: "secret.crt", KeyFile: "secret.key"}}}) + if len(result) != 1 || result[0] != tc.expected { + t.Errorf("expected: %v, actual: %v", []string{tc.expected}, result) + } + }) + } +} diff --git a/openshift-kube-apiserver/openshiftkubeapiserver/patch.go b/openshift-kube-apiserver/openshiftkubeapiserver/patch.go new file mode 100644 index 0000000000000..ce029240f67a9 --- /dev/null +++ b/openshift-kube-apiserver/openshiftkubeapiserver/patch.go @@ -0,0 +1,217 @@ +package openshiftkubeapiserver + +import ( + "os" + "time" + + "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy" + "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagereferencemutators" + "github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota" + "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccadmission" + apiclientv1 "github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1" + configclient "github.com/openshift/client-go/config/clientset/versioned" + configv1informer "github.com/openshift/client-go/config/informers/externalversions" + quotaclient "github.com/openshift/client-go/quota/clientset/versioned" + quotainformer "github.com/openshift/client-go/quota/informers/externalversions" + quotav1informer "github.com/openshift/client-go/quota/informers/externalversions/quota/v1" + securityv1client "github.com/openshift/client-go/security/clientset/versioned" + securityv1informer "github.com/openshift/client-go/security/informers/externalversions" + userclient "github.com/openshift/client-go/user/clientset/versioned" + userinformer "github.com/openshift/client-go/user/informers/externalversions" + "github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig" + "github.com/openshift/library-go/pkg/apiserver/apiserverconfig" + "github.com/openshift/library-go/pkg/quota/clusterquotamapping" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/quota/v1/generic" + genericapiserver "k8s.io/apiserver/pkg/server" + clientgoinformers "k8s.io/client-go/informers" + corev1informers "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/authorization/restrictusers" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/authorization/restrictusers/usercache" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/managednode" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/nodeenv" + "k8s.io/kubernetes/openshift-kube-apiserver/enablement" + "k8s.io/kubernetes/openshift-kube-apiserver/filters/apirequestcount" + "k8s.io/kubernetes/pkg/quota/v1/install" + + // magnet to get authorizer package in hack/update-vendor.sh + _ "github.com/openshift/library-go/pkg/authorization/hardcodedauthorizer" +) + +func OpenShiftKubeAPIServerConfigPatch(genericConfig *genericapiserver.Config, kubeInformers clientgoinformers.SharedInformerFactory, pluginInitializers *[]admission.PluginInitializer) error { + if !enablement.IsOpenShift() { + return nil + } + + openshiftInformers, err := newInformers(genericConfig.LoopbackClientConfig) + if err != nil { + return err + } + + // AUTHORIZER + genericConfig.RequestInfoResolver = apiserverconfig.OpenshiftRequestInfoResolver() + // END AUTHORIZER + + // Inject OpenShift API long running endpoints (like for binary builds). + // TODO: We should disable the timeout code for aggregated endpoints as this can cause problems when upstream add additional endpoints. + genericConfig.LongRunningFunc = apiserverconfig.IsLongRunningRequest + + // ADMISSION + clusterQuotaMappingController := newClusterQuotaMappingController(kubeInformers.Core().V1().Namespaces(), openshiftInformers.OpenshiftQuotaInformers.Quota().V1().ClusterResourceQuotas()) + genericConfig.AddPostStartHookOrDie("quota.openshift.io-clusterquotamapping", func(context genericapiserver.PostStartHookContext) error { + go clusterQuotaMappingController.Run(5, context.Done()) + return nil + }) + + *pluginInitializers = append(*pluginInitializers, + imagepolicy.NewInitializer(imagereferencemutators.KubeImageMutators{}, enablement.OpenshiftConfig().ImagePolicyConfig.InternalRegistryHostname), + restrictusers.NewInitializer(openshiftInformers.getOpenshiftUserInformers()), + sccadmission.NewInitializer(openshiftInformers.getOpenshiftSecurityInformers().Security().V1().SecurityContextConstraints()), + clusterresourcequota.NewInitializer( + openshiftInformers.getOpenshiftQuotaInformers().Quota().V1().ClusterResourceQuotas(), + clusterQuotaMappingController.GetClusterQuotaMapper(), + generic.NewRegistry(install.NewQuotaConfigurationForAdmission().Evaluators()), + ), + nodeenv.NewInitializer(enablement.OpenshiftConfig().ProjectConfig.DefaultNodeSelector), + admissionrestconfig.NewInitializer(*rest.CopyConfig(genericConfig.LoopbackClientConfig)), + managementcpusoverride.NewInitializer(openshiftInformers.getOpenshiftInfraInformers().Config().V1().Infrastructures()), + managednode.NewInitializer(openshiftInformers.getOpenshiftInfraInformers().Config().V1().Infrastructures()), + ) + + // This is needed in order to have the correct initializers for the SCC admission plugin which is used to mutate + // PodSpecs for PodSpec-y workload objects in the pod security admission plugin. + enablement.SCCAdmissionPlugin.SetAuthorizer(genericConfig.Authorization.Authorizer) + enablement.SCCAdmissionPlugin.SetSecurityInformers(openshiftInformers.getOpenshiftSecurityInformers().Security().V1().SecurityContextConstraints()) + enablement.SCCAdmissionPlugin.SetExternalKubeInformerFactory(kubeInformers) + // END ADMISSION + + // HANDLER CHAIN (with oauth server and web console) + apiserverClient, err := apiclientv1.NewForConfig(makeJSONRESTConfig(genericConfig.LoopbackClientConfig)) + if err != nil { + return err + } + apiRequestCountController := apirequestcount.NewController(apiserverClient.APIRequestCounts(), nodeFor()) + genericConfig.AddPostStartHook("openshift.io-api-request-count-filter", func(context genericapiserver.PostStartHookContext) error { + go apiRequestCountController.Start(context.Done()) + return nil + }) + genericConfig.BuildHandlerChainFunc, err = BuildHandlerChain( + enablement.OpenshiftConfig().AuthConfig.OAuthMetadataFile, + kubeInformers.Core().V1().ConfigMaps(), + apiRequestCountController, + ) + if err != nil { + return err + } + // END HANDLER CHAIN + + openshiftAPIServiceReachabilityCheck := newOpenshiftAPIServiceReachabilityCheck(genericConfig.PublicAddress) + oauthAPIServiceReachabilityCheck := newOAuthPIServiceReachabilityCheck(genericConfig.PublicAddress) + genericConfig.ReadyzChecks = append(genericConfig.ReadyzChecks, openshiftAPIServiceReachabilityCheck, oauthAPIServiceReachabilityCheck) + + genericConfig.AddPostStartHookOrDie("openshift.io-startkubeinformers", func(context genericapiserver.PostStartHookContext) error { + go openshiftInformers.Start(context.Done()) + return nil + }) + genericConfig.AddPostStartHookOrDie("openshift.io-openshift-apiserver-reachable", func(context genericapiserver.PostStartHookContext) error { + go openshiftAPIServiceReachabilityCheck.checkForConnection(context) + return nil + }) + genericConfig.AddPostStartHookOrDie("openshift.io-oauth-apiserver-reachable", func(context genericapiserver.PostStartHookContext) error { + go oauthAPIServiceReachabilityCheck.checkForConnection(context) + return nil + }) + enablement.AppendPostStartHooksOrDie(genericConfig) + + return nil +} + +func makeJSONRESTConfig(config *rest.Config) *rest.Config { + c := rest.CopyConfig(config) + c.AcceptContentTypes = "application/json" + c.ContentType = "application/json" + return c +} + +func nodeFor() string { + node := os.Getenv("HOST_IP") + if hostname, err := os.Hostname(); err != nil { + node = hostname + } + return node +} + +// newInformers is only exposed for the build's integration testing until it can be fixed more appropriately. +func newInformers(loopbackClientConfig *rest.Config) (*kubeAPIServerInformers, error) { + // ClusterResourceQuota is served using CRD resource any status update must use JSON + jsonLoopbackClientConfig := makeJSONRESTConfig(loopbackClientConfig) + + quotaClient, err := quotaclient.NewForConfig(jsonLoopbackClientConfig) + if err != nil { + return nil, err + } + securityClient, err := securityv1client.NewForConfig(jsonLoopbackClientConfig) + if err != nil { + return nil, err + } + userClient, err := userclient.NewForConfig(loopbackClientConfig) + if err != nil { + return nil, err + } + configClient, err := configclient.NewForConfig(loopbackClientConfig) + if err != nil { + return nil, err + } + + // TODO find a single place to create and start informers. During the 1.7 rebase this will come more naturally in a config object, + // before then we should try to eliminate our direct to storage access. It's making us do weird things. + const defaultInformerResyncPeriod = 10 * time.Minute + + ret := &kubeAPIServerInformers{ + OpenshiftQuotaInformers: quotainformer.NewSharedInformerFactory(quotaClient, defaultInformerResyncPeriod), + OpenshiftSecurityInformers: securityv1informer.NewSharedInformerFactory(securityClient, defaultInformerResyncPeriod), + OpenshiftUserInformers: userinformer.NewSharedInformerFactory(userClient, defaultInformerResyncPeriod), + OpenshiftConfigInformers: configv1informer.NewSharedInformerFactory(configClient, defaultInformerResyncPeriod), + } + if err := ret.OpenshiftUserInformers.User().V1().Groups().Informer().AddIndexers(cache.Indexers{ + usercache.ByUserIndexName: usercache.ByUserIndexKeys, + }); err != nil { + return nil, err + } + + return ret, nil +} + +type kubeAPIServerInformers struct { + OpenshiftQuotaInformers quotainformer.SharedInformerFactory + OpenshiftSecurityInformers securityv1informer.SharedInformerFactory + OpenshiftUserInformers userinformer.SharedInformerFactory + OpenshiftConfigInformers configv1informer.SharedInformerFactory +} + +func (i *kubeAPIServerInformers) getOpenshiftQuotaInformers() quotainformer.SharedInformerFactory { + return i.OpenshiftQuotaInformers +} +func (i *kubeAPIServerInformers) getOpenshiftSecurityInformers() securityv1informer.SharedInformerFactory { + return i.OpenshiftSecurityInformers +} +func (i *kubeAPIServerInformers) getOpenshiftUserInformers() userinformer.SharedInformerFactory { + return i.OpenshiftUserInformers +} +func (i *kubeAPIServerInformers) getOpenshiftInfraInformers() configv1informer.SharedInformerFactory { + return i.OpenshiftConfigInformers +} + +func (i *kubeAPIServerInformers) Start(stopCh <-chan struct{}) { + i.OpenshiftQuotaInformers.Start(stopCh) + i.OpenshiftSecurityInformers.Start(stopCh) + i.OpenshiftUserInformers.Start(stopCh) + i.OpenshiftConfigInformers.Start(stopCh) +} + +func newClusterQuotaMappingController(nsInternalInformer corev1informers.NamespaceInformer, clusterQuotaInformer quotav1informer.ClusterResourceQuotaInformer) *clusterquotamapping.ClusterQuotaMappingController { + return clusterquotamapping.NewClusterQuotaMappingController(nsInternalInformer, clusterQuotaInformer) +} diff --git a/openshift-kube-apiserver/openshiftkubeapiserver/patch_handlerchain.go b/openshift-kube-apiserver/openshiftkubeapiserver/patch_handlerchain.go new file mode 100644 index 0000000000000..4a7150a86992d --- /dev/null +++ b/openshift-kube-apiserver/openshiftkubeapiserver/patch_handlerchain.go @@ -0,0 +1,117 @@ +package openshiftkubeapiserver + +import ( + "net/http" + "strings" + + authenticationv1 "k8s.io/api/authentication/v1" + genericapiserver "k8s.io/apiserver/pkg/server" + coreinformers "k8s.io/client-go/informers/core/v1" + patchfilters "k8s.io/kubernetes/openshift-kube-apiserver/filters" + "k8s.io/kubernetes/openshift-kube-apiserver/filters/apirequestcount" + + authorizationv1 "github.com/openshift/api/authorization/v1" +) + +const ( + openShiftConfigManagedNamespaceName = "openshift-config-managed" + consolePublicConfigMapName = "console-public" +) + +// TODO switch back to taking a kubeapiserver config. For now make it obviously safe for 3.11 +func BuildHandlerChain(oauthMetadataFile string, cmInformer coreinformers.ConfigMapInformer, requestLogger apirequestcount.APIRequestLogger) (func(apiHandler http.Handler, kc *genericapiserver.Config) http.Handler, error) { + // load the oauthmetadata when we can return an error + oAuthMetadata := []byte{} + if len(oauthMetadataFile) > 0 { + var err error + oAuthMetadata, err = loadOAuthMetadataFile(oauthMetadataFile) + if err != nil { + return nil, err + } + } + + return func(apiHandler http.Handler, genericConfig *genericapiserver.Config) http.Handler { + // well-known comes after the normal handling chain. This shows where to connect for oauth information + handler := withOAuthInfo(apiHandler, oAuthMetadata) + + // after normal chain, so that user is in context + handler = patchfilters.WithAPIRequestCountLogging(handler, requestLogger) + + // this is the normal kube handler chain + handler = genericapiserver.DefaultBuildHandlerChain(handler, genericConfig) + + // these handlers are all before the normal kube chain + handler = translateLegacyScopeImpersonation(handler) + + // redirects from / and /console to consolePublicURL if you're using a browser + handler = withConsoleRedirect(handler, cmInformer) + + return handler + }, + + nil +} + +// If we know the location of the asset server, redirect to it when / is requested +// and the Accept header supports text/html +func withOAuthInfo(handler http.Handler, oAuthMetadata []byte) http.Handler { + if len(oAuthMetadata) == 0 { + return handler + } + + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if req.URL.Path != oauthMetadataEndpoint { + // Dispatch to the next handler + handler.ServeHTTP(w, req) + return + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + w.Write(oAuthMetadata) + }) +} + +// If we know the location of the asset server, redirect to it when / is requested +// and the Accept header supports text/html +func withConsoleRedirect(handler http.Handler, cmInformer coreinformers.ConfigMapInformer) http.Handler { + cmLister := cmInformer.Lister() + informer := cmInformer.Informer() + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if !strings.HasPrefix(req.URL.Path, "/console") { + // Dispatch to the next handler + handler.ServeHTTP(w, req) + return + } + + consoleUrl := "" + if informer.HasSynced() { + consolePublicConfig, err := cmLister.ConfigMaps(openShiftConfigManagedNamespaceName).Get(consolePublicConfigMapName) + if err == nil { + consoleUrl = consolePublicConfig.Data["consoleURL"] + } + } + if len(consoleUrl) > 0 { + http.Redirect(w, req, consoleUrl, http.StatusFound) + return + } + http.Error(w, "redirection failed: console URL not found", http.StatusInternalServerError) + }) +} + +// legacyImpersonateUserScopeHeader is the header name older servers were using +// just for scopes, so we need to translate it from clients that may still be +// using it. +const legacyImpersonateUserScopeHeader = "Impersonate-User-Scope" + +// translateLegacyScopeImpersonation is a filter that will translates user scope impersonation for openshift into the equivalent kube headers. +func translateLegacyScopeImpersonation(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + for _, scope := range req.Header[legacyImpersonateUserScopeHeader] { + req.Header[authenticationv1.ImpersonateUserExtraHeaderPrefix+authorizationv1.ScopesKey] = + append(req.Header[authenticationv1.ImpersonateUserExtraHeaderPrefix+authorizationv1.ScopesKey], scope) + } + + handler.ServeHTTP(w, req) + }) +} diff --git a/openshift-kube-apiserver/openshiftkubeapiserver/sdn_readyz_wait.go b/openshift-kube-apiserver/openshiftkubeapiserver/sdn_readyz_wait.go new file mode 100644 index 0000000000000..98f22c86fb469 --- /dev/null +++ b/openshift-kube-apiserver/openshiftkubeapiserver/sdn_readyz_wait.go @@ -0,0 +1,177 @@ +package openshiftkubeapiserver + +import ( + gocontext "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "net/http/httputil" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + genericapiserver "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" +) + +func newOpenshiftAPIServiceReachabilityCheck(ipForKubernetesDefaultService net.IP) *aggregatedAPIServiceAvailabilityCheck { + return newAggregatedAPIServiceReachabilityCheck(ipForKubernetesDefaultService, "openshift-apiserver", "api") +} + +func newOAuthPIServiceReachabilityCheck(ipForKubernetesDefaultService net.IP) *aggregatedAPIServiceAvailabilityCheck { + return newAggregatedAPIServiceReachabilityCheck(ipForKubernetesDefaultService, "openshift-oauth-apiserver", "api") +} + +// if the API service is not found, then this check returns quickly. +// if the endpoint is not accessible within 60 seconds, we report ready no matter what +// otherwise, wait for up to 60 seconds to be able to reach the apiserver +func newAggregatedAPIServiceReachabilityCheck(ipForKubernetesDefaultService net.IP, namespace, service string) *aggregatedAPIServiceAvailabilityCheck { + return &aggregatedAPIServiceAvailabilityCheck{ + done: make(chan struct{}), + ipForKubernetesDefaultService: ipForKubernetesDefaultService, + namespace: namespace, + serviceName: service, + } +} + +type aggregatedAPIServiceAvailabilityCheck struct { + // done indicates that this check is complete (success or failure) and the check should return true + done chan struct{} + + // ipForKubernetesDefaultService is used to determine whether this endpoint is the only one for the kubernetes.default.svc + // if so, it will report reachable immediately because honoring some requests is better than honoring no requests. + ipForKubernetesDefaultService net.IP + + // namespace is the namespace hosting the service for the aggregated api + namespace string + // serviceName is used to get a list of endpoints to directly dial + serviceName string +} + +func (c *aggregatedAPIServiceAvailabilityCheck) Name() string { + return fmt.Sprintf("%s-%s-available", c.serviceName, c.namespace) +} + +func (c *aggregatedAPIServiceAvailabilityCheck) Check(req *http.Request) error { + select { + case <-c.done: + return nil + default: + return fmt.Errorf("check is not yet complete") + } +} + +func (c *aggregatedAPIServiceAvailabilityCheck) checkForConnection(context genericapiserver.PostStartHookContext) { + defer utilruntime.HandleCrash() + + reachedAggregatedAPIServer := make(chan struct{}) + noAggregatedAPIServer := make(chan struct{}) + waitUntilCh := make(chan struct{}) + defer func() { + close(waitUntilCh) // this stops the endpoint check + close(c.done) // once this method is done, the ready check should return true + }() + start := time.Now() + + kubeClient, err := kubernetes.NewForConfig(context.LoopbackClientConfig) + if err != nil { + // shouldn't happen. this means the loopback config didn't work. + panic(err) + } + + ctx, cancel := gocontext.WithTimeout(gocontext.TODO(), 30*time.Second) + defer cancel() + + // if the kubernetes.default.svc needs an endpoint and this is the only apiserver than can fulfill it, then we don't + // wait for reachability. We wait for other conditions, but unreachable apiservers correctly 503 for clients. + kubeEndpoints, err := kubeClient.CoreV1().Endpoints("default").Get(ctx, "kubernetes", metav1.GetOptions{}) + switch { + case apierrors.IsNotFound(err): + utilruntime.HandleError(fmt.Errorf("%s did not find a kubernetes.default.svc endpoint", c.Name())) + return + case err != nil: + utilruntime.HandleError(fmt.Errorf("%s unable to read a kubernetes.default.svc endpoint: %w", c.Name(), err)) + return + case len(kubeEndpoints.Subsets) == 0: + utilruntime.HandleError(fmt.Errorf("%s did not find any IPs for kubernetes.default.svc endpoint", c.Name())) + return + case len(kubeEndpoints.Subsets[0].Addresses) == 0: + utilruntime.HandleError(fmt.Errorf("%s did not find any IPs for kubernetes.default.svc endpoint", c.Name())) + return + case len(kubeEndpoints.Subsets[0].Addresses) == 1: + if kubeEndpoints.Subsets[0].Addresses[0].IP == c.ipForKubernetesDefaultService.String() { + utilruntime.HandleError(fmt.Errorf("%s only found this kube-apiserver's IP (%v) in kubernetes.default.svc endpoint", c.Name(), c.ipForKubernetesDefaultService)) + return + } + } + + // Start a thread which repeatedly tries to connect to any aggregated apiserver endpoint. + // 1. if the aggregated apiserver endpoint doesn't exist, logs a warning and reports ready + // 2. if a connection cannot be made, after 60 seconds logs an error and reports ready -- this avoids a rebootstrapping cycle + // 3. as soon as a connection can be made, logs a time to be ready and reports ready. + go func() { + defer utilruntime.HandleCrash() + + client := http.Client{ + Transport: &http.Transport{ + // since any http return code satisfies us, we don't bother to send credentials. + // we don't care about someone faking a response and we aren't sending credentials, so we don't check the server CA + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }, + Timeout: 1 * time.Second, // these should all be very fast. if none work, we continue anyway. + } + + wait.PollImmediateUntil(1*time.Second, func() (bool, error) { + ctx := gocontext.TODO() + openshiftEndpoints, err := kubeClient.CoreV1().Endpoints(c.namespace).Get(ctx, c.serviceName, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + // if we have no aggregated apiserver endpoint, we have no reason to wait + klog.Warningf("%s.%s.svc endpoints were not found", c.serviceName, c.namespace) + close(noAggregatedAPIServer) + return true, nil + } + if err != nil { + utilruntime.HandleError(err) + return false, nil + } + for _, subset := range openshiftEndpoints.Subsets { + for _, address := range subset.Addresses { + url := fmt.Sprintf("https://%v", net.JoinHostPort(address.IP, "8443")) + resp, err := client.Get(url) + if err == nil { // any http response is fine. it means that we made contact + response, dumpErr := httputil.DumpResponse(resp, true) + klog.V(4).Infof("reached to connect to %q: %v\n%v", url, dumpErr, string(response)) + close(reachedAggregatedAPIServer) + resp.Body.Close() + return true, nil + } + klog.V(2).Infof("failed to connect to %q: %v", url, err) + } + } + + return false, nil + }, waitUntilCh) + }() + + select { + case <-time.After(60 * time.Second): + // if we timeout, always return ok so that we can start from a case where all kube-apiservers are down and the SDN isn't coming up + utilruntime.HandleError(fmt.Errorf("%s never reached apiserver", c.Name())) + return + case <-context.Done(): + utilruntime.HandleError(fmt.Errorf("%s interrupted", c.Name())) + return + case <-noAggregatedAPIServer: + utilruntime.HandleError(fmt.Errorf("%s did not find an %s endpoint", c.Name(), c.namespace)) + return + + case <-reachedAggregatedAPIServer: + end := time.Now() + klog.Infof("reached %s via SDN after %v milliseconds", c.namespace, end.Sub(start).Milliseconds()) + return + } +} diff --git a/openshift-kube-apiserver/openshiftkubeapiserver/wellknown_oauth.go b/openshift-kube-apiserver/openshiftkubeapiserver/wellknown_oauth.go new file mode 100644 index 0000000000000..8b34da7aa3203 --- /dev/null +++ b/openshift-kube-apiserver/openshiftkubeapiserver/wellknown_oauth.go @@ -0,0 +1,57 @@ +package openshiftkubeapiserver + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/url" + + "github.com/openshift/library-go/pkg/oauth/oauthdiscovery" +) + +const ( + // Discovery endpoint for OAuth 2.0 Authorization Server Metadata + // See IETF Draft: + // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + oauthMetadataEndpoint = "/.well-known/oauth-authorization-server" +) + +func validateURL(urlString string) error { + urlObj, err := url.Parse(urlString) + if err != nil { + return fmt.Errorf("%q is an invalid URL: %v", urlString, err) + } + if len(urlObj.Scheme) == 0 { + return fmt.Errorf("must contain a valid scheme") + } + if len(urlObj.Host) == 0 { + return fmt.Errorf("must contain a valid host") + } + return nil +} + +func loadOAuthMetadataFile(metadataFile string) ([]byte, error) { + data, err := ioutil.ReadFile(metadataFile) + if err != nil { + return nil, fmt.Errorf("unable to read External OAuth Metadata file: %v", err) + } + + oauthMetadata := &oauthdiscovery.OauthAuthorizationServerMetadata{} + if err := json.Unmarshal(data, oauthMetadata); err != nil { + return nil, fmt.Errorf("unable to decode External OAuth Metadata file: %v", err) + } + + if err := validateURL(oauthMetadata.Issuer); err != nil { + return nil, fmt.Errorf("error validating External OAuth Metadata Issuer field: %v", err) + } + + if err := validateURL(oauthMetadata.AuthorizationEndpoint); err != nil { + return nil, fmt.Errorf("error validating External OAuth Metadata AuthorizationEndpoint field: %v", err) + } + + if err := validateURL(oauthMetadata.TokenEndpoint); err != nil { + return nil, fmt.Errorf("error validating External OAuth Metadata TokenEndpoint field: %v", err) + } + + return data, nil +} diff --git a/openshift-kube-controller-manager/servicecacertpublisher/metrics.go b/openshift-kube-controller-manager/servicecacertpublisher/metrics.go new file mode 100644 index 0000000000000..e6867784043a8 --- /dev/null +++ b/openshift-kube-controller-manager/servicecacertpublisher/metrics.go @@ -0,0 +1,56 @@ +package servicecacertpublisher + +import ( + "strconv" + "sync" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +// ServiceCACertPublisher - subsystem name used by service_ca_cert_publisher +const ServiceCACertPublisher = "service_ca_cert_publisher" + +var ( + syncCounter = metrics.NewCounterVec( + &metrics.CounterOpts{ + Subsystem: ServiceCACertPublisher, + Name: "sync_total", + Help: "Number of namespace syncs happened in service ca cert publisher.", + StabilityLevel: metrics.ALPHA, + }, + []string{"code"}, + ) + syncLatency = metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Subsystem: ServiceCACertPublisher, + Name: "sync_duration_seconds", + Help: "Number of namespace syncs happened in service ca cert publisher.", + Buckets: metrics.ExponentialBuckets(0.001, 2, 15), + StabilityLevel: metrics.ALPHA, + }, + []string{"code"}, + ) +) + +func recordMetrics(start time.Time, ns string, err error) { + code := "500" + if err == nil { + code = "200" + } else if se, ok := err.(*apierrors.StatusError); ok && se.Status().Code != 0 { + code = strconv.Itoa(int(se.Status().Code)) + } + syncLatency.WithLabelValues(code).Observe(time.Since(start).Seconds()) + syncCounter.WithLabelValues(code).Inc() +} + +var once sync.Once + +func registerMetrics() { + once.Do(func() { + legacyregistry.MustRegister(syncCounter) + legacyregistry.MustRegister(syncLatency) + }) +} diff --git a/openshift-kube-controller-manager/servicecacertpublisher/metrics_test.go b/openshift-kube-controller-manager/servicecacertpublisher/metrics_test.go new file mode 100644 index 0000000000000..75f7297e3ff59 --- /dev/null +++ b/openshift-kube-controller-manager/servicecacertpublisher/metrics_test.go @@ -0,0 +1,81 @@ +package servicecacertpublisher + +import ( + "errors" + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/component-base/metrics/testutil" +) + +func TestSyncCounter(t *testing.T) { + testCases := []struct { + desc string + err error + metrics []string + want string + }{ + { + desc: "nil error", + err: nil, + metrics: []string{ + "service_ca_cert_publisher_sync_total", + }, + want: ` +# HELP service_ca_cert_publisher_sync_total [ALPHA] Number of namespace syncs happened in service ca cert publisher. +# TYPE service_ca_cert_publisher_sync_total counter +service_ca_cert_publisher_sync_total{code="200"} 1 + `, + }, + { + desc: "kube api error", + err: apierrors.NewNotFound(corev1.Resource("configmap"), "test-configmap"), + metrics: []string{ + "service_ca_cert_publisher_sync_total", + }, + want: ` +# HELP service_ca_cert_publisher_sync_total [ALPHA] Number of namespace syncs happened in service ca cert publisher. +# TYPE service_ca_cert_publisher_sync_total counter +service_ca_cert_publisher_sync_total{code="404"} 1 + `, + }, + { + desc: "kube api error without code", + err: &apierrors.StatusError{}, + metrics: []string{ + "service_ca_cert_publisher_sync_total", + }, + want: ` +# HELP service_ca_cert_publisher_sync_total [ALPHA] Number of namespace syncs happened in service ca cert publisher. +# TYPE service_ca_cert_publisher_sync_total counter +service_ca_cert_publisher_sync_total{code="500"} 1 + `, + }, + { + desc: "general error", + err: errors.New("test"), + metrics: []string{ + "service_ca_cert_publisher_sync_total", + }, + want: ` +# HELP service_ca_cert_publisher_sync_total [ALPHA] Number of namespace syncs happened in service ca cert publisher. +# TYPE service_ca_cert_publisher_sync_total counter +service_ca_cert_publisher_sync_total{code="500"} 1 + `, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + recordMetrics(time.Now(), "test-ns", tc.err) + defer syncCounter.Reset() + if err := testutil.GatherAndCompare(legacyregistry.DefaultGatherer, strings.NewReader(tc.want), tc.metrics...); err != nil { + t.Fatal(err) + } + }) + } +} diff --git a/openshift-kube-controller-manager/servicecacertpublisher/publisher.go b/openshift-kube-controller-manager/servicecacertpublisher/publisher.go new file mode 100644 index 0000000000000..af17ee9802650 --- /dev/null +++ b/openshift-kube-controller-manager/servicecacertpublisher/publisher.go @@ -0,0 +1,253 @@ +package servicecacertpublisher + +import ( + "context" + "fmt" + "os" + "reflect" + "strconv" + "sync" + "time" + + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + coreinformers "k8s.io/client-go/informers/core/v1" + clientset "k8s.io/client-go/kubernetes" + corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +// ServiceCACertConfigMapName is name of the configmap which stores certificates +// to validate service serving certificates issued by the service ca operator. +const ServiceCACertConfigMapName = "openshift-service-ca.crt" + +func init() { + registerMetrics() +} + +// NewPublisher construct a new controller which would manage the configmap +// which stores certificates in each namespace. It will make sure certificate +// configmap exists in each namespace. +func NewPublisher(cmInformer coreinformers.ConfigMapInformer, nsInformer coreinformers.NamespaceInformer, cl clientset.Interface) (*Publisher, error) { + e := &Publisher{ + client: cl, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "service_ca_cert_publisher"), + } + + cmInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + DeleteFunc: e.configMapDeleted, + UpdateFunc: e.configMapUpdated, + }) + e.cmLister = cmInformer.Lister() + e.cmListerSynced = cmInformer.Informer().HasSynced + + nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: e.namespaceAdded, + UpdateFunc: e.namespaceUpdated, + }) + e.nsListerSynced = nsInformer.Informer().HasSynced + + e.syncHandler = e.syncNamespace + + return e, nil +} + +// Publisher manages certificate ConfigMap objects inside Namespaces +type Publisher struct { + client clientset.Interface + + // To allow injection for testing. + syncHandler func(key string) error + + cmLister corelisters.ConfigMapLister + cmListerSynced cache.InformerSynced + + nsListerSynced cache.InformerSynced + + queue workqueue.RateLimitingInterface +} + +// Run starts process +func (c *Publisher) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting service CA certificate configmap publisher") + defer klog.Infof("Shutting down service CA certificate configmap publisher") + + if !cache.WaitForNamedCacheSync("crt configmap", stopCh, c.cmListerSynced) { + return + } + + for i := 0; i < workers; i++ { + go wait.Until(c.runWorker, time.Second, stopCh) + } + + <-stopCh +} + +func (c *Publisher) configMapDeleted(obj interface{}) { + cm, err := convertToCM(obj) + if err != nil { + utilruntime.HandleError(err) + return + } + if cm.Name != ServiceCACertConfigMapName { + return + } + c.queue.Add(cm.Namespace) +} + +func (c *Publisher) configMapUpdated(_, newObj interface{}) { + cm, err := convertToCM(newObj) + if err != nil { + utilruntime.HandleError(err) + return + } + if cm.Name != ServiceCACertConfigMapName { + return + } + c.queue.Add(cm.Namespace) +} + +func (c *Publisher) namespaceAdded(obj interface{}) { + namespace := obj.(*v1.Namespace) + c.queue.Add(namespace.Name) +} + +func (c *Publisher) namespaceUpdated(oldObj interface{}, newObj interface{}) { + newNamespace := newObj.(*v1.Namespace) + if newNamespace.Status.Phase != v1.NamespaceActive { + return + } + c.queue.Add(newNamespace.Name) +} + +func (c *Publisher) runWorker() { + for c.processNextWorkItem() { + } +} + +// processNextWorkItem deals with one key off the queue. It returns false when +// it's time to quit. +func (c *Publisher) processNextWorkItem() bool { + key, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(key) + + if err := c.syncHandler(key.(string)); err != nil { + utilruntime.HandleError(fmt.Errorf("syncing %q failed: %v", key, err)) + c.queue.AddRateLimited(key) + return true + } + + c.queue.Forget(key) + return true +} + +var ( + // default secure + // This annotation prompts the service ca operator to inject + // the service ca bundle into the configmap. + injectionAnnotation = map[string]string{ + "service.beta.openshift.io/inject-cabundle": "true", + } + setAnnotationOnce = sync.Once{} +) + +func getInjectionAnnotation() map[string]string { + setAnnotationOnce.Do(func() { + // this envvar can be used to get the kube-controller-manager to inject a vulnerable legacy service ca + // the kube-controller-manager carries no existing patches to launch, so we aren't going add new + // perma-flags. + // it would be nicer to find a way to pass this more obviously. This is a deep side-effect. + // though ideally, we see this age out over time. + useVulnerable := os.Getenv("OPENSHIFT_USE_VULNERABLE_LEGACY_SERVICE_CA_CRT") + if len(useVulnerable) == 0 { + return + } + useVulnerableBool, err := strconv.ParseBool(useVulnerable) + if err != nil { + // caller went crazy, don't use this unless you're careful + panic(err) + } + if useVulnerableBool { + // This annotation prompts the service ca operator to inject + // the vulnerable, legacy service ca bundle into the configmap. + injectionAnnotation = map[string]string{ + "service.alpha.openshift.io/inject-vulnerable-legacy-cabundle": "true", + } + } + }) + + return injectionAnnotation +} + +func (c *Publisher) syncNamespace(ns string) (err error) { + startTime := time.Now() + defer func() { + recordMetrics(startTime, ns, err) + klog.V(4).Infof("Finished syncing namespace %q (%v)", ns, time.Since(startTime)) + }() + + annotations := getInjectionAnnotation() + + cm, err := c.cmLister.ConfigMaps(ns).Get(ServiceCACertConfigMapName) + switch { + case apierrors.IsNotFound(err): + _, err = c.client.CoreV1().ConfigMaps(ns).Create(context.TODO(), &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: ServiceCACertConfigMapName, + Annotations: annotations, + }, + // Create new configmaps with the field referenced by the default + // projected volume. This ensures that pods - including the pod for + // service ca operator - will be able to start during initial + // deployment before the service ca operator has responded to the + // injection annotation. + Data: map[string]string{ + "service-ca.crt": "", + }, + }, metav1.CreateOptions{}) + // don't retry a create if the namespace doesn't exist or is terminating + if apierrors.IsNotFound(err) || apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) { + return nil + } + return err + case err != nil: + return err + } + + if reflect.DeepEqual(cm.Annotations, annotations) { + return nil + } + + // copy so we don't modify the cache's instance of the configmap + cm = cm.DeepCopy() + cm.Annotations = annotations + + _, err = c.client.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm, metav1.UpdateOptions{}) + return err +} + +func convertToCM(obj interface{}) (*v1.ConfigMap, error) { + cm, ok := obj.(*v1.ConfigMap) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + return nil, fmt.Errorf("couldn't get object from tombstone %#v", obj) + } + cm, ok = tombstone.Obj.(*v1.ConfigMap) + if !ok { + return nil, fmt.Errorf("tombstone contained object that is not a ConfigMap %#v", obj) + } + } + return cm, nil +} diff --git a/openshift-kube-controller-manager/servicecacertpublisher/publisher_test.go b/openshift-kube-controller-manager/servicecacertpublisher/publisher_test.go new file mode 100644 index 0000000000000..23373a555e304 --- /dev/null +++ b/openshift-kube-controller-manager/servicecacertpublisher/publisher_test.go @@ -0,0 +1,161 @@ +package servicecacertpublisher + +import ( + "reflect" + "testing" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/kubernetes/pkg/controller" +) + +func TestConfigMapCreation(t *testing.T) { + ns := metav1.NamespaceDefault + + caConfigMap := defaultCrtConfigMapPtr() + addAnnotationCM := defaultCrtConfigMapPtr() + addAnnotationCM.Annotations["test"] = "test" + modifyAnnotationCM := defaultCrtConfigMapPtr() + modifyAnnotationCM.Annotations["service.beta.openshift.io/inject-cabundle"] = "no" + otherConfigMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "other", + Namespace: ns, + ResourceVersion: "1", + }, + } + updateOtherConfigMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "other", + Namespace: ns, + ResourceVersion: "1", + Annotations: map[string]string{"test": "true"}, + }, + } + + existNS := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: ns}, + Status: v1.NamespaceStatus{ + Phase: v1.NamespaceActive, + }, + } + newNs := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "new"}, + Status: v1.NamespaceStatus{ + Phase: v1.NamespaceActive, + }, + } + terminatingNS := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: ns}, + Status: v1.NamespaceStatus{ + Phase: v1.NamespaceTerminating, + }, + } + + type action struct { + verb string + name string + } + testcases := map[string]struct { + ExistingConfigMaps []*v1.ConfigMap + AddedNamespace *v1.Namespace + UpdatedNamespace *v1.Namespace + DeletedConfigMap *v1.ConfigMap + UpdatedConfigMap *v1.ConfigMap + ExpectActions []action + }{ + "create new namespace": { + AddedNamespace: newNs, + ExpectActions: []action{{verb: "create", name: ServiceCACertConfigMapName}}, + }, + "delete other configmap": { + ExistingConfigMaps: []*v1.ConfigMap{otherConfigMap, caConfigMap}, + DeletedConfigMap: otherConfigMap, + }, + "delete ca configmap": { + ExistingConfigMaps: []*v1.ConfigMap{otherConfigMap, caConfigMap}, + DeletedConfigMap: caConfigMap, + ExpectActions: []action{{verb: "create", name: ServiceCACertConfigMapName}}, + }, + "update ca configmap with adding annotation": { + ExistingConfigMaps: []*v1.ConfigMap{caConfigMap}, + UpdatedConfigMap: addAnnotationCM, + ExpectActions: []action{{verb: "update", name: ServiceCACertConfigMapName}}, + }, + "update ca configmap with modifying annotation": { + ExistingConfigMaps: []*v1.ConfigMap{caConfigMap}, + UpdatedConfigMap: modifyAnnotationCM, + ExpectActions: []action{{verb: "update", name: ServiceCACertConfigMapName}}, + }, + "update with other configmap": { + ExistingConfigMaps: []*v1.ConfigMap{caConfigMap, otherConfigMap}, + UpdatedConfigMap: updateOtherConfigMap, + }, + "update namespace with terminating state": { + UpdatedNamespace: terminatingNS, + }, + } + + for k, tc := range testcases { + t.Run(k, func(t *testing.T) { + client := fake.NewSimpleClientset(caConfigMap, existNS) + informers := informers.NewSharedInformerFactory(fake.NewSimpleClientset(), controller.NoResyncPeriodFunc()) + cmInformer := informers.Core().V1().ConfigMaps() + nsInformer := informers.Core().V1().Namespaces() + controller, err := NewPublisher(cmInformer, nsInformer, client) + if err != nil { + t.Fatalf("error creating controller: %v", err) + } + + cmStore := cmInformer.Informer().GetStore() + + controller.syncHandler = controller.syncNamespace + + for _, s := range tc.ExistingConfigMaps { + cmStore.Add(s) + } + + if tc.AddedNamespace != nil { + controller.namespaceAdded(tc.AddedNamespace) + } + if tc.UpdatedNamespace != nil { + controller.namespaceUpdated(nil, tc.UpdatedNamespace) + } + + if tc.DeletedConfigMap != nil { + cmStore.Delete(tc.DeletedConfigMap) + controller.configMapDeleted(tc.DeletedConfigMap) + } + + if tc.UpdatedConfigMap != nil { + cmStore.Add(tc.UpdatedConfigMap) + controller.configMapUpdated(nil, tc.UpdatedConfigMap) + } + + for controller.queue.Len() != 0 { + controller.processNextWorkItem() + } + + actions := client.Actions() + if reflect.DeepEqual(actions, tc.ExpectActions) { + t.Errorf("Unexpected actions:\n%s", diff.ObjectGoPrintDiff(actions, tc.ExpectActions)) + } + }) + } +} + +func defaultCrtConfigMapPtr() *v1.ConfigMap { + tmp := v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: ServiceCACertConfigMapName, + Annotations: map[string]string{ + "service.beta.openshift.io/inject-cabundle": "true", + }, + }, + } + tmp.Namespace = metav1.NamespaceDefault + return &tmp +} diff --git a/openshift.spec b/openshift.spec new file mode 100644 index 0000000000000..622f809dc9ca1 --- /dev/null +++ b/openshift.spec @@ -0,0 +1,180 @@ +#debuginfo not supported with Go +%global debug_package %{nil} +# modifying the Go binaries breaks the DWARF debugging +%global __os_install_post %{_rpmconfigdir}/brp-compress + +%global gopath %{_datadir}/gocode +%global import_path k8s.io/kubernetes + +%global golang_version 1.15 + +%{!?commit: +# DO NOT MODIFY: the value on the line below is sed-like replaced by openshift/doozer +%global commit 86b5e46426ba828f49195af21c56f7c6674b48f7 +} +%global shortcommit %(c=%{commit}; echo ${c:0:7}) +# DO NOT MODIFY: the value on the line below is sed-like replaced by openshift/doozer +%{!?os_git_vars: +%global os_git_vars OS_GIT_VERSION='' OS_GIT_COMMIT='' OS_GIT_MAJOR='' OS_GIT_MINOR='' OS_GIT_TREE_STATE='' +} + +%if 0%{?skip_build} +%global do_build 0 +%else +%global do_build 1 +%endif +%if 0%{?skip_prep} +%global do_prep 0 +%else +%global do_prep 1 +%endif +%if 0%{?skip_dist} +%global package_dist %{nil} +%else +%global package_dist %{dist} +%endif + +%{!?version: %global version 4.0.0} +%{!?release: %global release 1} + +Name: openshift +Version: %{version} +Release: %{release}%{package_dist} +Summary: Open Source Container Management by Red Hat +License: ASL 2.0 +URL: https://%{import_path} + +# If go_arches not defined fall through to implicit golang archs +%if 0%{?go_arches:1} +ExclusiveArch: %{go_arches} +%else +ExclusiveArch: x86_64 aarch64 ppc64le s390x +%endif + +# TODO(marun) tar archives are no longer published for 4.x. Should this value be removed? +Source0: https://%{import_path}/archive/%{commit}/%{name}-%{version}.tar.gz +BuildRequires: systemd +BuildRequires: bsdtar +BuildRequires: golang >= %{golang_version} +BuildRequires: krb5-devel +BuildRequires: rsync + +%description +OpenShift is a distribution of Kubernetes optimized for enterprise application +development and deployment. OpenShift adds developer and operational centric +tools on top of Kubernetes to enable rapid application development, easy +deployment and scaling, and long-term lifecycle maintenance for small and large +teams and applications. It provides a secure and multi-tenant configuration for +Kubernetes allowing you to safely host many different applications and workloads +on a unified cluster. + +%package hyperkube +Summary: OpenShift Kubernetes server commands, via deps +Requires: kube-scheduler = %{version} +Requires: kube-kubelet = %{version} +Requires: kube-controller-manager = %{version} +Requires: kube-apiserver = %{version} +Provides: hyperkube = %{version} +Obsoletes: atomic-openshift-hyperkube <= %{version} +Obsoletes: atomic-openshift-node <= %{version} + +%package kube-scheduler +Summary: OpenShift Kubernetes Scheduler +Provides: kube-scheduler = %{version} + +%package kubelet +Summary: OpenShift Kubernetes Kubelet +Requires: util-linux +Requires: socat +Requires: iptables +Provides: kube-kubelet = %{version} + +%package kube-controller-manager +Summary: OpenShift Kubernetes Controller Manager +Provides: kube-controller-manager = %{version} + +%package kube-apiserver +Summary: OpenShift Kubernetes API Server +Provides: kube-apiserver = %{version} + +%description hyperkube +%{summary} + +%description kube-scheduler +%{summary} + +%description kubelet +%{summary} + +%description kube-controller-manager +%{summary} + +%description kube-apiserver +%{summary} + +%prep +%if 0%{do_prep} +%setup -q +%endif + +%build +%if 0%{do_build} +# Create Binaries only for building arch +%ifarch x86_64 + BUILD_PLATFORM="linux/amd64" +%endif +%ifarch ppc64le + BUILD_PLATFORM="linux/ppc64le" +%endif +%ifarch %{arm} aarch64 + BUILD_PLATFORM="linux/arm64" +%endif +%ifarch s390x + BUILD_PLATFORM="linux/s390x" +%endif +KUBE_BUILD_PLATFORMS="${BUILD_PLATFORM}" %{os_git_vars} make all WHAT='cmd/kube-apiserver cmd/kube-controller-manager cmd/kube-scheduler cmd/kubelet' +%endif + +%install + +PLATFORM="$(go env GOHOSTOS)/$(go env GOHOSTARCH)" +install -d %{buildroot}%{_bindir} +install -d %{buildroot}%{_sysctldir} + +# Install linux components +for bin in kube-apiserver kube-controller-manager kube-scheduler kubelet +do + echo "+++ INSTALLING ${bin}" + install -p -m 755 _output/local/bin/${PLATFORM}/${bin} %{buildroot}%{_bindir}/${bin} +done + +install -p -m 755 openshift-hack/images/hyperkube/hyperkube %{buildroot}%{_bindir}/hyperkube +install -p -m 755 openshift-hack/images/hyperkube/kubensenter %{buildroot}%{_bindir}/kubensenter +install -p -m 755 openshift-hack/sysctls/50-kubelet.conf %{buildroot}%{_sysctldir}/50-kubelet.conf + +%post kubelet +%sysctl_apply 50-kubelet.conf + +%files hyperkube +%license LICENSE +%{_bindir}/hyperkube +%defattr(-,root,root,0700) + +%files kubelet +%{_bindir}/kubelet +%{_bindir}/kubensenter +%{_sysctldir}/50-kubelet.conf +%defattr(-,root,root,0700) + +%files kube-scheduler +%{_bindir}/kube-scheduler + +%files kube-controller-manager +%{_bindir}/kube-controller-manager + +%files kube-apiserver +%{_bindir}/kube-apiserver + + + +%changelog diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index 694896ee750c6..15a5f489052b8 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -6841,7 +6841,12 @@ func ValidateSecret(secret *core.Secret) field.ErrorList { func ValidateSecretUpdate(newSecret, oldSecret *core.Secret) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newSecret.ObjectMeta, &oldSecret.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateImmutableField(newSecret.Type, oldSecret.Type, field.NewPath("type"))...) + // TODO: this is a short term fix, we can drop this patch once we + // migrate all of the affected secret objects to to intended type, + // see https://issues.redhat.com/browse/API-1800 + if !openShiftValidateSecretUpdateIsTypeMutationAllowed(newSecret, oldSecret) { + allErrs = append(allErrs, ValidateImmutableField(newSecret.Type, oldSecret.Type, field.NewPath("type"))...) + } if oldSecret.Immutable != nil && *oldSecret.Immutable { if newSecret.Immutable == nil || !*newSecret.Immutable { allErrs = append(allErrs, field.Forbidden(field.NewPath("immutable"), "field is immutable when `immutable` is set")) diff --git a/pkg/apis/core/validation/validation_patch.go b/pkg/apis/core/validation/validation_patch.go new file mode 100644 index 0000000000000..237c4d3694922 --- /dev/null +++ b/pkg/apis/core/validation/validation_patch.go @@ -0,0 +1,68 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "k8s.io/kubernetes/pkg/apis/core" +) + +var ( + // we have multiple controllers reconciling the same secret, + // resulting in unexpected outcomes such as the generation of new key pairs. + // our goal is to prevent the generation of new key pairs by disallowing + // deletions and permitting only updates, which appear to be 'safe'. + // + // thus we make an exception for the secrets in the following namespaces, during update + // we allow the secret type to mutate from: + // ["SecretTypeTLS", core.SecretTypeOpaque] -> "kubernetes.io/tls" + // some of our operators were accidentally creating secrets of type + // "SecretTypeTLS", and this patch enables us to move these secrets + // objects to the intended type in a ratcheting manner. + // + // we can drop this patch when we migrate all of the affected secret + // objects to to intended type: https://issues.redhat.com/browse/API-1800 + whitelist = map[string]struct{}{ + "openshift-kube-apiserver-operator": {}, + "openshift-kube-apiserver": {}, + "openshift-kube-controller-manager-operator": {}, + "openshift-config-managed": {}, + } +) + +func openShiftValidateSecretUpdateIsTypeMutationAllowed(newSecret, oldSecret *core.Secret) bool { + // initially, this check was stricter. + // however, due to the platform's long history (spanning several years) + // and the complexity of ensuring that resources were consistently created with only one type, + // it is now permissible for (SecretTypeTLS, core.SecretTypeOpaque) type to transition to "kubernetes.io/tls". + // + // additionally, it should be noted that default values might also be applied in some cases. + // (https://github.com/openshift/kubernetes/blob/258f1d5fb6491ba65fd8201c827e179432430627/pkg/apis/core/v1/defaults.go#L280-L284) + if isOldSecretTypeMutationAllowed(oldSecret) && newSecret.Type == core.SecretTypeTLS { + if _, ok := whitelist[oldSecret.Namespace]; ok { + return true + } + } + return false +} + +func isOldSecretTypeMutationAllowed(oldSecret *core.Secret) bool { + // core.SecretTypeOpaque seems safe because + // https://github.com/kubernetes/kubernetes/blob/8628c3c4da6746b1dc967cc520b189a04ebd78d1/pkg/apis/core/validation/validation.go#L6393 + // + // "SecretTypeTLS" is what kas-o used + return oldSecret.Type == core.SecretTypeOpaque || oldSecret.Type == "SecretTypeTLS" +} diff --git a/pkg/apis/core/validation/validation_patch_test.go b/pkg/apis/core/validation/validation_patch_test.go new file mode 100644 index 0000000000000..4bd34659f4c64 --- /dev/null +++ b/pkg/apis/core/validation/validation_patch_test.go @@ -0,0 +1,136 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/apis/core" +) + +func TestOpenShiftValidateSecretUpdate(t *testing.T) { + newSecretFn := func(ns, name string, secretType core.SecretType) *core.Secret { + return &core.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + ResourceVersion: "1", + }, + Type: secretType, + Data: map[string][]byte{ + "tls.key": []byte("foo"), + "tls.crt": []byte("bar"), + }, + } + } + invalidTypeErrFn := func(secretType core.SecretType) field.ErrorList { + return field.ErrorList{ + field.Invalid(field.NewPath("type"), secretType, "field is immutable"), + } + } + tlsKeyRequiredErrFn := func() field.ErrorList { + return field.ErrorList{ + field.Required(field.NewPath("data").Key(core.TLSCertKey), ""), + field.Required(field.NewPath("data").Key(core.TLSPrivateKeyKey), ""), + } + } + + for _, secretType := range []core.SecretType{"SecretTypeTLS", core.SecretTypeOpaque} { + for key := range whitelist { + ns, name := key, "foo" + t.Run(fmt.Sprintf("verify whitelist, key = %v, secretType = %v", key, secretType), func(t *testing.T) { + // exercise a valid type mutation: "secretType" -> "kubernetes.io/tls" + oldSecret, newSecret := newSecretFn(ns, name, secretType), newSecretFn(ns, name, core.SecretTypeTLS) + if errs := ValidateSecretUpdate(newSecret, oldSecret); len(errs) > 0 { + t.Errorf("unexpected error: %v", errs) + } + + // the reverse should not be allowed + errExpected := invalidTypeErrFn(secretType) + oldSecret, newSecret = newSecretFn(ns, name, core.SecretTypeTLS), newSecretFn(ns, name, secretType) + if errGot := ValidateSecretUpdate(newSecret, oldSecret); !cmp.Equal(errExpected, errGot) { + t.Errorf("expected error: %v, diff: %s", errExpected, cmp.Diff(errExpected, errGot)) + } + + // no type change, no validation failure expected + oldSecret, newSecret = newSecretFn(ns, name, core.SecretTypeTLS), newSecretFn(ns, name, core.SecretTypeTLS) + if errs := ValidateSecretUpdate(newSecret, oldSecret); len(errs) > 0 { + t.Errorf("unexpected error: %v", errs) + } + + // exercise an invalid type mutation, we expect validation failure + errExpected = invalidTypeErrFn(core.SecretTypeTLS) + oldSecret, newSecret = newSecretFn(ns, name, "AnyOtherType"), newSecretFn(ns, name, core.SecretTypeTLS) + if errGot := ValidateSecretUpdate(newSecret, oldSecret); !cmp.Equal(errExpected, errGot) { + t.Errorf("expected error: %v, diff: %s", errExpected, cmp.Diff(errExpected, errGot)) + } + + // verify that kbernetes.io/tls validation are enforced + errExpected = tlsKeyRequiredErrFn() + oldSecret, newSecret = newSecretFn(ns, name, secretType), newSecretFn(ns, name, core.SecretTypeTLS) + newSecret.Data = nil + if errGot := ValidateSecretUpdate(newSecret, oldSecret); !cmp.Equal(errExpected, errGot) { + t.Errorf("expected error: %v, diff: %s", errExpected, cmp.Diff(errExpected, errGot)) + } + }) + } + } + + // we must not break secrets that are not in the whitelist + tests := []struct { + name string + oldSecret *core.Secret + newSecret *core.Secret + errExpected field.ErrorList + }{ + { + name: "secret is not whitelisted, valid type transition, update not allowed", + oldSecret: newSecretFn("foo", "bar", "SecretTypeTLS"), + newSecret: newSecretFn("foo", "bar", core.SecretTypeTLS), + errExpected: invalidTypeErrFn(core.SecretTypeTLS), + }, + { + name: "secret is not whitelisted, invalid type transition, update not allowed", + oldSecret: newSecretFn("foo", "bar", "SecretTypeTLS"), + newSecret: newSecretFn("foo", "bar", core.SecretTypeOpaque), + errExpected: invalidTypeErrFn(core.SecretTypeOpaque), + }, + { + name: "secret is not whitelisted, no type transition, update allowed", + oldSecret: newSecretFn("foo", "bar", core.SecretTypeTLS), + newSecret: newSecretFn("foo", "bar", core.SecretTypeTLS), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if _, ok := whitelist[test.oldSecret.Namespace]; ok { + t.Errorf("misconfigured test: secret is in whitelist: %s", test.oldSecret.Namespace) + return + } + + errGot := ValidateSecretUpdate(test.newSecret, test.oldSecret) + if !cmp.Equal(test.errExpected, errGot) { + t.Errorf("expected error: %v, diff: %s", test.errExpected, cmp.Diff(test.errExpected, errGot)) + } + }) + } +} diff --git a/pkg/controller/daemon/daemon_controller.go b/pkg/controller/daemon/daemon_controller.go index 0076ca2411a95..07cf0cb537e8b 100644 --- a/pkg/controller/daemon/daemon_controller.go +++ b/pkg/controller/daemon/daemon_controller.go @@ -120,7 +120,13 @@ type DaemonSetsController struct { nodeLister corelisters.NodeLister // nodeStoreSynced returns true if the node store has been synced at least once. // Added as a member to the struct to allow injection for testing. - nodeStoreSynced cache.InformerSynced + nodeStoreSynced cache.InformerSynced + namespaceLister corelisters.NamespaceLister + namespaceStoreSynced cache.InformerSynced + openshiftDefaultNodeSelectorString string + openshiftDefaultNodeSelector labels.Selector + kubeDefaultNodeSelectorString string + kubeDefaultNodeSelector labels.Selector // DaemonSet keys that need to be synced. queue workqueue.TypedRateLimitingInterface[string] @@ -297,6 +303,11 @@ func (dsc *DaemonSetsController) Run(ctx context.Context, workers int) { if !cache.WaitForNamedCacheSync("daemon sets", ctx.Done(), dsc.podStoreSynced, dsc.nodeStoreSynced, dsc.historyStoreSynced, dsc.dsStoreSynced) { return } + if dsc.namespaceStoreSynced != nil { + if !cache.WaitForNamedCacheSync("daemon sets", ctx.Done(), dsc.namespaceStoreSynced) { + return + } + } for i := 0; i < workers; i++ { go wait.UntilWithContext(ctx, dsc.runWorker, time.Second) @@ -651,7 +662,7 @@ func (dsc *DaemonSetsController) addNode(logger klog.Logger, obj interface{}) { } node := obj.(*v1.Node) for _, ds := range dsList { - if shouldRun, _ := NodeShouldRunDaemonPod(node, ds); shouldRun { + if shouldRun, _ := dsc.nodeShouldRunDaemonPod(node, ds); shouldRun { dsc.enqueueDaemonSet(ds) } } @@ -678,9 +689,8 @@ func (dsc *DaemonSetsController) updateNode(logger klog.Logger, old, cur interfa } // TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too). for _, ds := range dsList { - // If NodeShouldRunDaemonPod needs to uses other than Labels and Taints (mutable) properties of node, it needs to update shouldIgnoreNodeUpdate. - oldShouldRun, oldShouldContinueRunning := NodeShouldRunDaemonPod(oldNode, ds) - currentShouldRun, currentShouldContinueRunning := NodeShouldRunDaemonPod(curNode, ds) + oldShouldRun, oldShouldContinueRunning := dsc.nodeShouldRunDaemonPod(oldNode, ds) + currentShouldRun, currentShouldContinueRunning := dsc.nodeShouldRunDaemonPod(curNode, ds) if (oldShouldRun != currentShouldRun) || (oldShouldContinueRunning != currentShouldContinueRunning) { dsc.enqueueDaemonSet(ds) } @@ -786,7 +796,7 @@ func (dsc *DaemonSetsController) podsShouldBeOnNode( hash string, ) (nodesNeedingDaemonPods, podsToDelete []string) { - shouldRun, shouldContinueRunning := NodeShouldRunDaemonPod(node, ds) + shouldRun, shouldContinueRunning := dsc.nodeShouldRunDaemonPod(node, ds) daemonPods, exists := nodeToDaemonPods[node.Name] switch { @@ -1141,7 +1151,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ctx context.Context, ds * var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable int now := dsc.failedPodsBackoff.Clock.Now() for _, node := range nodeList { - shouldRun, _ := NodeShouldRunDaemonPod(node, ds) + shouldRun, _ := dsc.nodeShouldRunDaemonPod(node, ds) scheduled := len(nodeToDaemonPods[node.Name]) > 0 if shouldRun { diff --git a/pkg/controller/daemon/patch_nodeselector.go b/pkg/controller/daemon/patch_nodeselector.go new file mode 100644 index 0000000000000..356437dd52299 --- /dev/null +++ b/pkg/controller/daemon/patch_nodeselector.go @@ -0,0 +1,109 @@ +package daemon + +import ( + "context" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + appsinformers "k8s.io/client-go/informers/apps/v1" + coreinformers "k8s.io/client-go/informers/core/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/util/flowcontrol" + + projectv1 "github.com/openshift/api/project/v1" +) + +func NewNodeSelectorAwareDaemonSetsController(ctx context.Context, openshiftDefaultNodeSelectorString, kubeDefaultNodeSelectorString string, namepaceInformer coreinformers.NamespaceInformer, daemonSetInformer appsinformers.DaemonSetInformer, historyInformer appsinformers.ControllerRevisionInformer, podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, kubeClient clientset.Interface, failedPodsBackoff *flowcontrol.Backoff) (*DaemonSetsController, error) { + controller, err := NewDaemonSetsController(ctx, daemonSetInformer, historyInformer, podInformer, nodeInformer, kubeClient, failedPodsBackoff) + + if err != nil { + return controller, err + } + controller.namespaceLister = namepaceInformer.Lister() + controller.namespaceStoreSynced = namepaceInformer.Informer().HasSynced + controller.openshiftDefaultNodeSelectorString = openshiftDefaultNodeSelectorString + if len(controller.openshiftDefaultNodeSelectorString) > 0 { + controller.openshiftDefaultNodeSelector, err = labels.Parse(controller.openshiftDefaultNodeSelectorString) + if err != nil { + return nil, err + } + } + controller.kubeDefaultNodeSelectorString = kubeDefaultNodeSelectorString + if len(controller.kubeDefaultNodeSelectorString) > 0 { + controller.kubeDefaultNodeSelector, err = labels.Parse(controller.kubeDefaultNodeSelectorString) + if err != nil { + return nil, err + } + } + + return controller, nil +} + +func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *appsv1.DaemonSet) (bool, bool) { + shouldRun, shouldContinueRunning := NodeShouldRunDaemonPod(node, ds) + if shouldRun && shouldContinueRunning { + if matches, matchErr := dsc.namespaceNodeSelectorMatches(node, ds); !matches || matchErr != nil { + return false, false + } + } + + return shouldRun, shouldContinueRunning +} + +func (dsc *DaemonSetsController) namespaceNodeSelectorMatches(node *v1.Node, ds *appsv1.DaemonSet) (bool, error) { + if dsc.namespaceLister == nil { + return true, nil + } + + // this is racy (different listers) and we get to choose which way to fail. This should requeue. + ns, err := dsc.namespaceLister.Get(ds.Namespace) + if apierrors.IsNotFound(err) { + return false, err + } + // if we had any error, default to the safe option of creating a pod for the node. + if err != nil { + utilruntime.HandleError(err) + return true, nil + } + + return dsc.nodeSelectorMatches(node, ns), nil +} + +func (dsc *DaemonSetsController) nodeSelectorMatches(node *v1.Node, ns *v1.Namespace) bool { + kubeNodeSelector, ok := ns.Annotations["scheduler.alpha.kubernetes.io/node-selector"] + if !ok { + originNodeSelector, ok := ns.Annotations[projectv1.ProjectNodeSelector] + switch { + case ok: + selector, err := labels.Parse(originNodeSelector) + if err == nil { + if !selector.Matches(labels.Set(node.Labels)) { + return false + } + } + case !ok && len(dsc.openshiftDefaultNodeSelectorString) > 0: + if !dsc.openshiftDefaultNodeSelector.Matches(labels.Set(node.Labels)) { + return false + } + } + } + + switch { + case ok: + selector, err := labels.Parse(kubeNodeSelector) + if err == nil { + if !selector.Matches(labels.Set(node.Labels)) { + return false + } + } + case !ok && len(dsc.kubeDefaultNodeSelectorString) > 0: + if !dsc.kubeDefaultNodeSelector.Matches(labels.Set(node.Labels)) { + return false + } + } + + return true +} diff --git a/pkg/controller/daemon/patch_nodeselector_test.go b/pkg/controller/daemon/patch_nodeselector_test.go new file mode 100644 index 0000000000000..6553fe694ea0a --- /dev/null +++ b/pkg/controller/daemon/patch_nodeselector_test.go @@ -0,0 +1,186 @@ +package daemon + +import ( + "testing" + + projectv1 "github.com/openshift/api/project/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +func TestNamespaceNodeSelectorMatches(t *testing.T) { + nodes := []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "first", + Labels: map[string]string{ + "alpha": "bravo", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "second", + Labels: map[string]string{ + "charlie": "delta", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "third", + Labels: map[string]string{ + "echo": "foxtrot", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fourth", + Labels: map[string]string{}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fifth", + Labels: map[string]string{ + "charlie": "delta", + "echo": "foxtrot", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "sixth", + Labels: map[string]string{ + "alpha": "bravo", + "charlie": "delta", + "echo": "foxtrot", + }, + }, + }, + } + + pureDefault := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{}, + } + all := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + projectv1.ProjectNodeSelector: "", + }, + }, + } + projectSpecified := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + projectv1.ProjectNodeSelector: "echo=foxtrot", + }, + }, + } + schedulerSpecified := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "scheduler.alpha.kubernetes.io/node-selector": "charlie=delta", + }, + }, + } + bothSpecified := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + projectv1.ProjectNodeSelector: "echo=foxtrot", + "scheduler.alpha.kubernetes.io/node-selector": "charlie=delta", + }, + }, + } + + tests := []struct { + name string + defaultSelector string + namespace *v1.Namespace + expected map[string]bool + }{ + { + name: "pure-default", + defaultSelector: "alpha=bravo", + namespace: pureDefault, + expected: map[string]bool{ + "first": true, + "sixth": true, + }, + }, + { + name: "all", + defaultSelector: "alpha=bravo", + namespace: all, + expected: map[string]bool{ + "first": true, + "second": true, + "third": true, + "fourth": true, + "fifth": true, + "sixth": true, + }, + }, + { + name: "pure-default-without-default", + namespace: pureDefault, + expected: map[string]bool{ + "first": true, + "second": true, + "third": true, + "fourth": true, + "fifth": true, + "sixth": true, + }, + }, + { + name: "projectSpecified", + namespace: projectSpecified, + expected: map[string]bool{ + "third": true, + "fifth": true, + "sixth": true, + }, + }, + { + name: "schedulerSpecified", + namespace: schedulerSpecified, + expected: map[string]bool{ + "second": true, + "fifth": true, + "sixth": true, + }, + }, + { + name: "bothSpecified", + namespace: bothSpecified, + expected: map[string]bool{ + "second": true, + "fifth": true, + "sixth": true, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + c := &DaemonSetsController{} + c.openshiftDefaultNodeSelectorString = test.defaultSelector + if len(c.openshiftDefaultNodeSelectorString) > 0 { + var err error + c.openshiftDefaultNodeSelector, err = labels.Parse(c.openshiftDefaultNodeSelectorString) + if err != nil { + t.Fatal(err) + } + } + + for _, node := range nodes { + if e, a := test.expected[node.Name], c.nodeSelectorMatches(node, test.namespace); e != a { + t.Errorf("%q expected %v, got %v", node.Name, e, a) + } + } + }) + } +} diff --git a/pkg/controller/serviceaccount/tokens_controller.go b/pkg/controller/serviceaccount/tokens_controller.go index eb430b2d723f9..3d05c894790f1 100644 --- a/pkg/controller/serviceaccount/tokens_controller.go +++ b/pkg/controller/serviceaccount/tokens_controller.go @@ -41,6 +41,8 @@ import ( "k8s.io/kubernetes/pkg/serviceaccount" ) +const ServiceServingCASecretKey = "service-ca.crt" + // RemoveTokenBackoff is the recommended (empirical) retry interval for removing // a secret reference from a service account when the secret is deleted. It is // exported for use by custom secret controllers. @@ -66,6 +68,9 @@ type TokensControllerOptions struct { // MaxRetries controls the maximum number of times a particular key is retried before giving up // If zero, a default max is used MaxRetries int + + // This CA will be added in the secrets of service accounts + ServiceServingCA []byte } // NewTokensController returns a new *TokensController. @@ -76,9 +81,10 @@ func NewTokensController(serviceAccounts informers.ServiceAccountInformer, secre } e := &TokensController{ - client: cl, - token: options.TokenGenerator, - rootCA: options.RootCA, + client: cl, + token: options.TokenGenerator, + rootCA: options.RootCA, + serviceServingCA: options.ServiceServingCA, syncServiceAccountQueue: workqueue.NewTypedRateLimitingQueueWithConfig( workqueue.DefaultTypedControllerRateLimiter[serviceAccountQueueKey](), @@ -134,7 +140,8 @@ type TokensController struct { client clientset.Interface token serviceaccount.TokenGenerator - rootCA []byte + rootCA []byte + serviceServingCA []byte serviceAccounts listersv1.ServiceAccountLister // updatedSecrets is a wrapper around the shared cache which allows us to record @@ -352,22 +359,23 @@ func (e *TokensController) deleteToken(ns, name string, uid types.UID) ( /*retry return true, err } -func (e *TokensController) secretUpdateNeeded(secret *v1.Secret) (bool, bool, bool) { +func (e *TokensController) secretUpdateNeeded(secret *v1.Secret) (bool, bool, bool, bool) { caData := secret.Data[v1.ServiceAccountRootCAKey] needsCA := len(e.rootCA) > 0 && !bytes.Equal(caData, e.rootCA) + needsServiceServingCA := len(e.serviceServingCA) > 0 && bytes.Compare(secret.Data[ServiceServingCASecretKey], e.serviceServingCA) != 0 needsNamespace := len(secret.Data[v1.ServiceAccountNamespaceKey]) == 0 tokenData := secret.Data[v1.ServiceAccountTokenKey] needsToken := len(tokenData) == 0 - return needsCA, needsNamespace, needsToken + return needsCA, needsServiceServingCA, needsNamespace, needsToken } // generateTokenIfNeeded populates the token data for the given Secret if not already set func (e *TokensController) generateTokenIfNeeded(logger klog.Logger, serviceAccount *v1.ServiceAccount, cachedSecret *v1.Secret) ( /* retry */ bool, error) { // Check the cached secret to see if changes are needed - if needsCA, needsNamespace, needsToken := e.secretUpdateNeeded(cachedSecret); !needsCA && !needsToken && !needsNamespace { + if needsCA, needsServiceServingCA, needsNamespace, needsToken := e.secretUpdateNeeded(cachedSecret); !needsCA && !needsServiceServingCA && !needsToken && !needsNamespace { return false, nil } @@ -386,8 +394,8 @@ func (e *TokensController) generateTokenIfNeeded(logger klog.Logger, serviceAcco return false, nil } - needsCA, needsNamespace, needsToken := e.secretUpdateNeeded(liveSecret) - if !needsCA && !needsToken && !needsNamespace { + needsCA, needsServiceServingCA, needsNamespace, needsToken := e.secretUpdateNeeded(liveSecret) + if !needsCA && !needsServiceServingCA && !needsToken && !needsNamespace { return false, nil } @@ -402,6 +410,9 @@ func (e *TokensController) generateTokenIfNeeded(logger klog.Logger, serviceAcco if needsCA { liveSecret.Data[v1.ServiceAccountRootCAKey] = e.rootCA } + if needsServiceServingCA { + liveSecret.Data[ServiceServingCASecretKey] = e.serviceServingCA + } // Set the namespace if needsNamespace { liveSecret.Data[v1.ServiceAccountNamespaceKey] = []byte(liveSecret.Namespace) diff --git a/pkg/controlplane/apiserver/config.go b/pkg/controlplane/apiserver/config.go index aed7e6866ec5e..66b436d1a3464 100644 --- a/pkg/controlplane/apiserver/config.go +++ b/pkg/controlplane/apiserver/config.go @@ -25,6 +25,11 @@ import ( noopoteltrace "go.opentelemetry.io/otel/trace/noop" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/admissionenablement" + "k8s.io/kubernetes/openshift-kube-apiserver/enablement" + "k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver" + eventstorage "k8s.io/kubernetes/pkg/registry/core/event/storage" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" utilnet "k8s.io/apimachinery/pkg/util/net" @@ -139,6 +144,8 @@ func BuildGenericConfig( // on a fast local network genericConfig.LoopbackClientConfig.DisableCompression = true + enablement.SetLoopbackClientConfig(genericConfig.LoopbackClientConfig) + kubeClientConfig := genericConfig.LoopbackClientConfig clientgoExternalClient, err := clientgoclientset.NewForConfig(kubeClientConfig) if err != nil { @@ -289,6 +296,13 @@ func CreateConfig( opts.Metrics.Apply() serviceaccount.RegisterMetrics() + var eventStorage *eventstorage.REST + eventStorage, err := eventstorage.NewREST(genericConfig.RESTOptionsGetter, uint64(opts.EventTTL.Seconds())) + if err != nil { + return nil, nil, err + } + genericConfig.EventSink = eventRegistrySink{eventStorage} + config := &Config{ Generic: genericConfig, Extra: Extra{ @@ -360,6 +374,15 @@ func CreateConfig( if err != nil { return nil, nil, fmt.Errorf("failed to create real dynamic external client: %w", err) } + + if err := openshiftkubeapiserver.OpenShiftKubeAPIServerConfigPatch(genericConfig, versionedInformers, &genericInitializers); err != nil { + return nil, nil, fmt.Errorf("failed to patch: %v", err) + } + + if enablement.IsOpenShift() { + admissionenablement.SetAdmissionDefaults(&opts, versionedInformers, clientgoExternalClient) + } + err = opts.Admission.ApplyTo( genericConfig, versionedInformers, diff --git a/pkg/controlplane/apiserver/patch_config.go b/pkg/controlplane/apiserver/patch_config.go new file mode 100644 index 0000000000000..67707dfcbe452 --- /dev/null +++ b/pkg/controlplane/apiserver/patch_config.go @@ -0,0 +1,66 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiserver + +import ( + "context" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/endpoints/request" + genericapiserver "k8s.io/apiserver/pkg/server" + "k8s.io/kubernetes/pkg/apis/core" + v1 "k8s.io/kubernetes/pkg/apis/core/v1" + eventstorage "k8s.io/kubernetes/pkg/registry/core/event/storage" +) + +// eventRegistrySink wraps an event registry in order to be used as direct event sync, without going through the API. +type eventRegistrySink struct { + *eventstorage.REST +} + +var _ genericapiserver.EventSink = eventRegistrySink{} + +func (s eventRegistrySink) Create(v1event *corev1.Event) (*corev1.Event, error) { + ctx := request.WithNamespace(request.WithRequestInfo(request.NewContext(), &request.RequestInfo{APIVersion: "v1"}), v1event.Namespace) + // since we are bypassing the API set a hard timeout for the storage layer + ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + defer cancel() + + var event core.Event + if err := v1.Convert_v1_Event_To_core_Event(v1event, &event, nil); err != nil { + return nil, err + } + + obj, err := s.REST.Create(ctx, &event, nil, &metav1.CreateOptions{}) + if err != nil { + return nil, err + } + ret, ok := obj.(*core.Event) + if !ok { + return nil, fmt.Errorf("expected corev1.Event, got %T", obj) + } + + var v1ret corev1.Event + if err := v1.Convert_core_Event_To_v1_Event(ret, &v1ret, nil); err != nil { + return nil, err + } + + return &v1ret, nil +} diff --git a/pkg/controlplane/apiserver/server.go b/pkg/controlplane/apiserver/server.go index dfeea62cf8b44..0d84d88e35ed7 100644 --- a/pkg/controlplane/apiserver/server.go +++ b/pkg/controlplane/apiserver/server.go @@ -22,6 +22,8 @@ import ( "os" "time" + "k8s.io/kubernetes/pkg/controlplane/controller/kubernetesservice" + coordinationapiv1 "k8s.io/api/coordination/v1" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -138,6 +140,8 @@ func (c completedConfig) New(name string, delegationTarget genericapiserver.Dele VersionedInformers: c.VersionedInformers, } + kubernetesservice.KubeAPIServerEmitEventFn = s.GenericAPIServer.Eventf + client, err := kubernetes.NewForConfig(s.GenericAPIServer.LoopbackClientConfig) if err != nil { return nil, err diff --git a/pkg/controlplane/controller/crdregistration/crdregistration_controller.go b/pkg/controlplane/controller/crdregistration/crdregistration_controller.go index 578a196aa37e9..4ed4364e9ddb8 100644 --- a/pkg/controlplane/controller/crdregistration/crdregistration_controller.go +++ b/pkg/controlplane/controller/crdregistration/crdregistration_controller.go @@ -33,6 +33,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + "k8s.io/kube-aggregator/pkg/apiserver" ) // AutoAPIServiceRegistration is an interface which callers can re-declare locally and properly cast to for @@ -196,6 +197,14 @@ func (c *crdRegistrationController) enqueueCRD(crd *apiextensionsv1.CustomResour func (c *crdRegistrationController) handleVersionUpdate(groupVersion schema.GroupVersion) error { apiServiceName := groupVersion.Version + "." + groupVersion.Group + if apiserver.APIServiceAlreadyExists(groupVersion) { + // Removing APIService from sync means the CRD registration controller won't sync this APIService + // anymore. If the APIService is managed externally, this will mean the external component can + // update this APIService without CRD controller stomping the changes on it. + c.apiServiceRegistration.RemoveAPIServiceToSync(apiServiceName) + return nil + } + // check all CRDs. There shouldn't that many, but if we have problems later we can index them crds, err := c.crdLister.List(labels.Everything()) if err != nil { @@ -215,8 +224,8 @@ func (c *crdRegistrationController) handleVersionUpdate(groupVersion schema.Grou Spec: v1.APIServiceSpec{ Group: groupVersion.Group, Version: groupVersion.Version, - GroupPriorityMinimum: 1000, // CRDs should have relatively low priority - VersionPriority: 100, // CRDs will be sorted by kube-like versions like any other APIService with the same VersionPriority + GroupPriorityMinimum: getGroupPriorityMin(groupVersion.Group), // CRDs should have relatively low priority + VersionPriority: 100, // CRDs will be sorted by kube-like versions like any other APIService with the same VersionPriority }, }) return nil diff --git a/pkg/controlplane/controller/crdregistration/patch.go b/pkg/controlplane/controller/crdregistration/patch.go new file mode 100644 index 0000000000000..ea989b6e1ed06 --- /dev/null +++ b/pkg/controlplane/controller/crdregistration/patch.go @@ -0,0 +1,12 @@ +package crdregistration + +func getGroupPriorityMin(group string) int32 { + switch group { + case "config.openshift.io": + return 1100 + case "operator.openshift.io": + return 1080 + default: + return 1000 + } +} diff --git a/pkg/controlplane/controller/kubernetesservice/controller.go b/pkg/controlplane/controller/kubernetesservice/controller.go index d8cfd44222ceb..e5d8c04206b72 100644 --- a/pkg/controlplane/controller/kubernetesservice/controller.go +++ b/pkg/controlplane/controller/kubernetesservice/controller.go @@ -155,6 +155,8 @@ func (c *Controller) Run(ch <-chan struct{}) { return } + KubeAPIServerEmitEventFn(corev1.EventTypeWarning, "KubeAPIReadyz", "readyz=true") + wait.NonSlidingUntil(func() { // Service definition is not reconciled after first // run, ports and type will be corrected only during diff --git a/pkg/controlplane/controller/kubernetesservice/patch.go b/pkg/controlplane/controller/kubernetesservice/patch.go new file mode 100644 index 0000000000000..d78731dda37cb --- /dev/null +++ b/pkg/controlplane/controller/kubernetesservice/patch.go @@ -0,0 +1,5 @@ +package kubernetesservice + +var KubeAPIServerEmitEventFn EventSinkFunc = nil + +type EventSinkFunc func(eventType, reason, messageFmt string, args ...interface{}) diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index f9838d90796dc..ebb471c98d592 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -871,6 +871,7 @@ const ( ) func init() { + registerOpenshiftFeatures() runtime.Must(utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates)) runtime.Must(utilfeature.DefaultMutableFeatureGate.AddVersioned(defaultVersionedKubernetesFeatureGates)) runtime.Must(zpagesfeatures.AddFeatureGates(utilfeature.DefaultMutableFeatureGate)) diff --git a/pkg/features/openshift_features.go b/pkg/features/openshift_features.go new file mode 100644 index 0000000000000..6325606ee8868 --- /dev/null +++ b/pkg/features/openshift_features.go @@ -0,0 +1,15 @@ +package features + +import ( + "k8s.io/component-base/featuregate" +) + +var RouteExternalCertificate featuregate.Feature = "RouteExternalCertificate" + +// registerOpenshiftFeatures injects openshift-specific feature gates +func registerOpenshiftFeatures() { + defaultKubernetesFeatureGates[RouteExternalCertificate] = featuregate.FeatureSpec{ + Default: false, + PreRelease: featuregate.Alpha, + } +} diff --git a/pkg/features/versioned_kube_features.go b/pkg/features/versioned_kube_features.go index ecee44abf7b30..1df6ba498235c 100644 --- a/pkg/features/versioned_kube_features.go +++ b/pkg/features/versioned_kube_features.go @@ -311,7 +311,7 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate }, genericfeatures.ResilientWatchCacheInitialization: { - {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta}, }, genericfeatures.RetryGenerateName: { @@ -546,8 +546,8 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate }, NodeLogQuery: { - {Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha}, - {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, }, NodeSwap: { diff --git a/pkg/kubeapiserver/authorizer/config.go b/pkg/kubeapiserver/authorizer/config.go index 7a778ebd86789..d3b3541646997 100644 --- a/pkg/kubeapiserver/authorizer/config.go +++ b/pkg/kubeapiserver/authorizer/config.go @@ -35,6 +35,7 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" versionedinformers "k8s.io/client-go/informers" resourceinformers "k8s.io/client-go/informers/resource/v1beta1" + "k8s.io/kubernetes/openshift-kube-apiserver/authorization/scopeauthorizer" "k8s.io/kubernetes/pkg/auth/authorizer/abac" "k8s.io/kubernetes/pkg/auth/nodeidentifier" "k8s.io/kubernetes/pkg/features" @@ -126,6 +127,9 @@ func (config Config) New(ctx context.Context, serverID string) (authorizer.Autho &rbac.ClusterRoleGetter{Lister: config.VersionedInformerFactory.Rbac().V1().ClusterRoles().Lister()}, &rbac.ClusterRoleBindingLister{Lister: config.VersionedInformerFactory.Rbac().V1().ClusterRoleBindings().Lister()}, ) + case authzconfig.AuthorizerType(modes.ModeScope): + // Wrap with an authorizer that detects unsafe requests and modifies verbs/resources appropriately so policy can address them separately + r.scopeLimitedAuthorizer = scopeauthorizer.NewAuthorizer(config.VersionedInformerFactory.Rbac().V1().ClusterRoles().Lister()) } } diff --git a/pkg/kubeapiserver/authorizer/modes/patch.go b/pkg/kubeapiserver/authorizer/modes/patch.go new file mode 100644 index 0000000000000..bc892601ebe6f --- /dev/null +++ b/pkg/kubeapiserver/authorizer/modes/patch.go @@ -0,0 +1,8 @@ +package modes + +var ModeScope = "Scope" +var ModeSystemMasters = "SystemMasters" + +func init() { + AuthorizationModeChoices = append(AuthorizationModeChoices, ModeScope, ModeSystemMasters) +} diff --git a/pkg/kubeapiserver/authorizer/patch.go b/pkg/kubeapiserver/authorizer/patch.go new file mode 100644 index 0000000000000..8a095efcf98d5 --- /dev/null +++ b/pkg/kubeapiserver/authorizer/patch.go @@ -0,0 +1,8 @@ +package authorizer + +var skipSystemMastersAuthorizer = false + +// SkipSystemMastersAuthorizer disable implicitly added system/master authz, and turn it into another authz mode "SystemMasters", to be added via authorization-mode +func SkipSystemMastersAuthorizer() { + skipSystemMastersAuthorizer = true +} diff --git a/pkg/kubeapiserver/authorizer/reload.go b/pkg/kubeapiserver/authorizer/reload.go index 2e3525686bd47..381765d81c36c 100644 --- a/pkg/kubeapiserver/authorizer/reload.go +++ b/pkg/kubeapiserver/authorizer/reload.go @@ -27,6 +27,8 @@ import ( "sync/atomic" "time" + "k8s.io/kubernetes/openshift-kube-apiserver/authorization/browsersafe" + "k8s.io/apimachinery/pkg/util/sets" authzconfig "k8s.io/apiserver/pkg/apis/apiserver" "k8s.io/apiserver/pkg/authentication/user" @@ -58,10 +60,11 @@ type reloadableAuthorizerResolver struct { reloadInterval time.Duration requireNonWebhookTypes sets.Set[authzconfig.AuthorizerType] - nodeAuthorizer *node.NodeAuthorizer - rbacAuthorizer *rbac.RBACAuthorizer - abacAuthorizer abac.PolicyList - compiler authorizationcel.Compiler // non-nil and shared across reloads. + nodeAuthorizer *node.NodeAuthorizer + rbacAuthorizer *rbac.RBACAuthorizer + scopeLimitedAuthorizer authorizer.Authorizer + abacAuthorizer abac.PolicyList + compiler authorizationcel.Compiler // non-nil and shared across reloads. lastLoadedLock sync.Mutex lastLoadedConfig *authzconfig.AuthorizationConfiguration @@ -94,9 +97,11 @@ func (r *reloadableAuthorizerResolver) newForConfig(authzConfig *authzconfig.Aut ruleResolvers []authorizer.RuleResolver ) - // Add SystemPrivilegedGroup as an authorizing group - superuserAuthorizer := authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup) - authorizers = append(authorizers, superuserAuthorizer) + if !skipSystemMastersAuthorizer { + // Add SystemPrivilegedGroup as an authorizing group + superuserAuthorizer := authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup) + authorizers = append(authorizers, superuserAuthorizer) + } for _, configuredAuthorizer := range authzConfig.Authorizers { // Keep cases in sync with constant list in k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes/modes.go. @@ -161,8 +166,15 @@ func (r *reloadableAuthorizerResolver) newForConfig(authzConfig *authzconfig.Aut if r.rbacAuthorizer == nil { return nil, nil, fmt.Errorf("authorizer type RBAC is not allowed if it was not enabled at initial server startup") } - authorizers = append(authorizers, authorizationmetrics.InstrumentedAuthorizer(string(configuredAuthorizer.Type), configuredAuthorizer.Name, r.rbacAuthorizer)) + // Wrap with an authorizer that detects unsafe requests and modifies verbs/resources appropriately so policy can address them separately + authorizers = append(authorizers, authorizationmetrics.InstrumentedAuthorizer(string(configuredAuthorizer.Type), configuredAuthorizer.Name, browsersafe.NewBrowserSafeAuthorizer(r.rbacAuthorizer, user.AllAuthenticated))) ruleResolvers = append(ruleResolvers, r.rbacAuthorizer) + case authzconfig.AuthorizerType(modes.ModeScope): + // Wrap with an authorizer that detects unsafe requests and modifies verbs/resources appropriately so policy can address them separately + authorizers = append(authorizers, browsersafe.NewBrowserSafeAuthorizer(r.scopeLimitedAuthorizer, user.AllAuthenticated)) + case authzconfig.AuthorizerType(modes.ModeSystemMasters): + // no browsersafeauthorizer here becase that rewrites the resources. This authorizer matches no matter which resource matches. + authorizers = append(authorizers, authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup)) default: return nil, nil, fmt.Errorf("unknown authorization mode %s specified", configuredAuthorizer.Type) } diff --git a/pkg/kubelet/DOWNSTREAM_OWNERS b/pkg/kubelet/DOWNSTREAM_OWNERS new file mode 100644 index 0000000000000..d484fa4fc246a --- /dev/null +++ b/pkg/kubelet/DOWNSTREAM_OWNERS @@ -0,0 +1,17 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# Downstream reviewers, don't have to match those in OWNERS +reviewers: + - rphillips + - sjenning + - mrunalp + +# Sub-package approvers from upstream with permission to approve downstream backports following these rules: +# - they MUST be approvers upstream (here compare https://github.com/kubernetes/kubernetes/blob/17bb2fc050ec786b60db7d8d6d4d3ac8eeac205b/pkg/kubelet/OWNERS#L10-L11) +# - they may approve "UPSTREAM: : ..." changes that merged upstream. +# - carry patches for "UPSTREAM: : ..." and any unmerged PRs of the previous kind will have to be approved by the top-level approvers. +approvers: + - sjenning + - mrunalp + +component: node diff --git a/pkg/kubelet/apis/config/validation/validation_test.go b/pkg/kubelet/apis/config/validation/validation_test.go index 802c5506d37ce..fdfe74c8acc3e 100644 --- a/pkg/kubelet/apis/config/validation/validation_test.go +++ b/pkg/kubelet/apis/config/validation/validation_test.go @@ -389,6 +389,7 @@ func TestValidateKubeletConfiguration(t *testing.T) { conf.CrashLoopBackOff = kubeletconfig.CrashLoopBackOffConfig{ MaxContainerRestartPeriod: &metav1.Duration{Duration: 0 * time.Second}, } + return conf }, errMsg: "invalid configuration: CrashLoopBackOff.MaxContainerRestartPeriod (got: 0 seconds) must be set between 1s and 300s", @@ -613,6 +614,7 @@ func TestValidateKubeletConfiguration(t *testing.T) { }, { name: "enableSystemLogQuery is enabled without NodeLogQuery feature gate", configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration { + conf.FeatureGates = map[string]bool{"NodeLogQuery": false} conf.EnableSystemLogQuery = true return conf }, @@ -720,7 +722,9 @@ func TestValidateKubeletConfiguration(t *testing.T) { conf.FeatureGates["invalid"] = true return conf }, - errMsg: "unrecognized feature gate: invalid", + // In OpenShift we need to tolerate unrecognized feature gates + // errMsg: "unrecognized feature gate: invalid", + errMsg: "", }, } diff --git a/pkg/kubelet/cadvisor/cadvisor_linux.go b/pkg/kubelet/cadvisor/cadvisor_linux.go index c2756c6f12334..a99452a0d2f1c 100644 --- a/pkg/kubelet/cadvisor/cadvisor_linux.go +++ b/pkg/kubelet/cadvisor/cadvisor_linux.go @@ -59,10 +59,17 @@ const defaultHousekeepingInterval = 10 * time.Second const allowDynamicHousekeeping = true func init() { + maxHouseKeeping := maxHousekeepingInterval.String() + if value := os.Getenv("OPENSHIFT_MAX_HOUSEKEEPING_INTERVAL_DURATION"); value != "" { + klog.Infof("Detected OPENSHIFT_MAX_HOUSEKEEPING_INTERVAL_DURATION: %v", value) + maxHouseKeeping = value + } // Override cAdvisor flag defaults. flagOverrides := map[string]string{ // Override the default cAdvisor housekeeping interval. "housekeeping_interval": defaultHousekeepingInterval.String(), + // Override the default max cAdvisor housekeeping interval. + "max_housekeeping_interval": maxHouseKeeping, // Disable event storage by default. "event_storage_event_limit": "default=0", "event_storage_age_limit": "default=0", diff --git a/pkg/kubelet/cm/cgroup_manager_linux.go b/pkg/kubelet/cm/cgroup_manager_linux.go index 6bcb1298c15a9..3da3b2ddb0de1 100644 --- a/pkg/kubelet/cm/cgroup_manager_linux.go +++ b/pkg/kubelet/cm/cgroup_manager_linux.go @@ -25,6 +25,7 @@ import ( "sync" "time" + "github.com/opencontainers/runc/libcontainer/cgroups" libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" libcontainercgroupmanager "github.com/opencontainers/runc/libcontainer/cgroups/manager" @@ -146,6 +147,10 @@ type cgroupCommon struct { // useSystemd tells if systemd cgroup manager should be used. useSystemd bool + + // cpuLoadBalanceDisable tells whether kubelet should disable + // cpu load balancing on new cgroups it creates. + cpuLoadBalanceDisable bool } // Make sure that cgroupV1impl and cgroupV2impl implement the CgroupManager interface @@ -390,6 +395,25 @@ func (m *cgroupCommon) Create(cgroupConfig *CgroupConfig) error { return err } + // Disable cpuset.sched_load_balance for all cgroups Kubelet creates. + // This way, CRI can disable sched_load_balance for pods that must have load balance + // disabled, but the slices can contain all cpus (as the guaranteed cpus are known dynamically). + // Note: this should be done before Apply(-1) below, as Apply contains cpusetCopyIfNeeded(), which will + // populate the cpuset with the parent's cpuset. However, it will be initialized to sched_load_balance=1 + // which will cause the kernel to move all cpusets out of their isolated sched_domain, causing unnecessary churn. + if m.cpuLoadBalanceDisable && !libcontainercgroups.IsCgroup2UnifiedMode() { + path := manager.Path("cpuset") + if path == "" { + return fmt.Errorf("Failed to find cpuset for newly created cgroup") + } + if err := os.MkdirAll(path, 0o755); err != nil { + return fmt.Errorf("failed to create cpuset for newly created cgroup: %w", err) + } + if err := cgroups.WriteFile(path, "cpuset.sched_load_balance", "0"); err != nil { + return err + } + } + // Apply(-1) is a hack to create the cgroup directories for each resource // subsystem. The function [cgroups.Manager.apply()] applies cgroup // configuration to the process with the specified pid. @@ -405,7 +429,6 @@ func (m *cgroupCommon) Create(cgroupConfig *CgroupConfig) error { if err := manager.Set(libcontainerCgroupConfig.Resources); err != nil { utilruntime.HandleError(fmt.Errorf("cgroup manager.Set failed: %w", err)) } - return nil } diff --git a/pkg/kubelet/cm/cgroup_manager_unsupported.go b/pkg/kubelet/cm/cgroup_manager_unsupported.go index a98ea0c9b5f52..986054121b450 100644 --- a/pkg/kubelet/cm/cgroup_manager_unsupported.go +++ b/pkg/kubelet/cm/cgroup_manager_unsupported.go @@ -93,6 +93,9 @@ func (m *unsupportedCgroupManager) SetCgroupConfig(name CgroupName, resourceConf return errNotSupported } +func (m *unsupportedCgroupManager) SetCPULoadBalanceDisable() { +} + var RootCgroupName = CgroupName([]string{}) func NewCgroupName(base CgroupName, components ...string) CgroupName { diff --git a/pkg/kubelet/cm/cgroup_v1_manager_linux.go b/pkg/kubelet/cm/cgroup_v1_manager_linux.go index afd02925833ae..9f35ebd7fd1eb 100644 --- a/pkg/kubelet/cm/cgroup_v1_manager_linux.go +++ b/pkg/kubelet/cm/cgroup_v1_manager_linux.go @@ -143,3 +143,7 @@ func (c *cgroupV1impl) getCgroupCPUConfig(cgroupPath string) (*ResourceConfig, e func (c *cgroupV1impl) getCgroupMemoryConfig(cgroupPath string) (*ResourceConfig, error) { return readCgroupMemoryConfig(cgroupPath, cgroupv1MemLimitFile) } + +func (m *cgroupV1impl) SetCPULoadBalanceDisable() { + m.cpuLoadBalanceDisable = true +} diff --git a/pkg/kubelet/cm/cgroup_v2_manager_linux.go b/pkg/kubelet/cm/cgroup_v2_manager_linux.go index ad93e971fef0a..d19667217d822 100644 --- a/pkg/kubelet/cm/cgroup_v2_manager_linux.go +++ b/pkg/kubelet/cm/cgroup_v2_manager_linux.go @@ -175,3 +175,7 @@ func cpuSharesToCPUWeight(cpuShares uint64) uint64 { func cpuWeightToCPUShares(cpuWeight uint64) uint64 { return uint64((((cpuWeight - 1) * 262142) / 9999) + 2) } + +func (m *cgroupV2impl) SetCPULoadBalanceDisable() { + m.cpuLoadBalanceDisable = true +} diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index efbd90c1d5f24..27ef6a302a168 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -251,6 +251,9 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I cgroupRoot := ParseCgroupfsToCgroupName(nodeConfig.CgroupRoot) cgroupManager := NewCgroupManager(subsystems, nodeConfig.CgroupDriver) nodeConfig.CgroupVersion = cgroupManager.Version() + if nodeConfig.CPUManagerPolicy == string(cpumanager.PolicyStatic) { + cgroupManager.SetCPULoadBalanceDisable() + } // Check if Cgroup-root actually exists on the node if nodeConfig.CgroupsPerQOS { // this does default to / when enabled, but this tests against regressions. diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager.go b/pkg/kubelet/cm/cpumanager/cpu_manager.go index 9e72a7cc97aa3..6b1a34165b56c 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_manager.go +++ b/pkg/kubelet/cm/cpumanager/cpu_manager.go @@ -35,6 +35,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" "k8s.io/kubernetes/pkg/kubelet/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/managed" "k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/utils/cpuset" ) @@ -409,6 +410,7 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec failure = []reconciledContainer{} m.removeStaleState() + workloadEnabled := managed.IsEnabled() for _, pod := range m.activePods() { pstatus, ok := m.podStatusProvider.GetPodStatus(pod.UID) if !ok { @@ -416,6 +418,10 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec failure = append(failure, reconciledContainer{pod.Name, "", ""}) continue } + if enabled, _, _ := managed.IsPodManaged(pod); workloadEnabled && enabled { + klog.V(4).InfoS("[cpumanager] reconcileState: skipping pod; pod is managed (pod: %s)", pod.Name) + continue + } allContainers := pod.Spec.InitContainers allContainers = append(allContainers, pod.Spec.Containers...) diff --git a/pkg/kubelet/cm/cpumanager/policy_options.go b/pkg/kubelet/cm/cpumanager/policy_options.go index 5b30d04a3ce5b..d9d571bd5980a 100644 --- a/pkg/kubelet/cm/cpumanager/policy_options.go +++ b/pkg/kubelet/cm/cpumanager/policy_options.go @@ -22,9 +22,11 @@ import ( "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog/v2" kubefeatures "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" + "k8s.io/kubernetes/pkg/kubelet/llcalign" ) // Names of the options, as part of the user interface. @@ -58,6 +60,14 @@ func CheckPolicyOptionAvailable(option string) error { return fmt.Errorf("unknown CPU Manager Policy option: %q", option) } + // must override the base feature gate check. Relevant only for alpha (disabled by default). + // for beta options are enabled by default and we totally want to keep the possibility to + // disable them explicitly. + if alphaOptions.Has(option) && checkPolicyOptionHasEnablementFile(option) { + // note that we override the decision and shortcut exit with success + // all other cases exit early with failure. + return nil + } if alphaOptions.Has(option) && !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CPUManagerPolicyAlphaOptions) { return fmt.Errorf("CPU Manager Policy Alpha-level Options not enabled, but option %q provided", option) } @@ -183,3 +193,13 @@ func ValidateStaticPolicyOptions(opts StaticPolicyOptions, topology *topology.CP } return nil } + +func checkPolicyOptionHasEnablementFile(option string) bool { + switch option { + case PreferAlignByUnCoreCacheOption: + val := llcalign.IsEnabled() + klog.InfoS("policy option enablement file check", "option", option, "enablementFile", val) + return val + } + return false +} diff --git a/pkg/kubelet/cm/cpumanager/policy_options_test.go b/pkg/kubelet/cm/cpumanager/policy_options_test.go index e2c360bbfb7ad..242d33dab5818 100644 --- a/pkg/kubelet/cm/cpumanager/policy_options_test.go +++ b/pkg/kubelet/cm/cpumanager/policy_options_test.go @@ -25,6 +25,7 @@ import ( pkgfeatures "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" + "k8s.io/kubernetes/pkg/kubelet/llcalign" ) type optionAvailTest struct { @@ -258,3 +259,65 @@ func TestPolicyOptionsCompatibility(t *testing.T) { }) } } + +func TestPolicyOptionsAvailableWithEnablement(t *testing.T) { + + type optionAvailEnabTest struct { + name string + option string + featureGate featuregate.Feature + featureGateEnable bool + featureEnablementFlag bool + expectedAvailable bool + } + + testCases := []optionAvailEnabTest{ + { + name: "all disabled", + option: PreferAlignByUnCoreCacheOption, + featureGate: pkgfeatures.CPUManagerPolicyAlphaOptions, + featureGateEnable: false, // expected standard case + featureEnablementFlag: false, + expectedAvailable: false, + }, + { + name: "all enabled", + option: PreferAlignByUnCoreCacheOption, + featureGate: pkgfeatures.CPUManagerPolicyAlphaOptions, + featureGateEnable: true, // this should not be allowed by OCP profiles + featureEnablementFlag: true, + expectedAvailable: true, + }, + { + name: "enabled by feature gate", + option: PreferAlignByUnCoreCacheOption, + featureGate: pkgfeatures.CPUManagerPolicyAlphaOptions, + featureGateEnable: true, // this should not be allowed by OCP profiles, makes no sense either + featureEnablementFlag: false, + expectedAvailable: true, + }, + { + name: "enabled by enablement file", + option: PreferAlignByUnCoreCacheOption, + featureGate: pkgfeatures.CPUManagerPolicyAlphaOptions, + featureGateEnable: false, + featureEnablementFlag: true, + expectedAvailable: true, + }, + } + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, testCase.featureGate, testCase.featureGateEnable) + oldEnablementFlag := llcalign.TestOnlySetEnabled(testCase.featureEnablementFlag) + + err := CheckPolicyOptionAvailable(testCase.option) + + _ = llcalign.TestOnlySetEnabled(oldEnablementFlag) + + isEnabled := (err == nil) + if isEnabled != testCase.expectedAvailable { + t.Errorf("option %q available got=%v expected=%v", testCase.option, isEnabled, testCase.expectedAvailable) + } + }) + } +} diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go index 26d1fc6d91bef..eba1e454f6f6a 100644 --- a/pkg/kubelet/cm/cpumanager/policy_static.go +++ b/pkg/kubelet/cm/cpumanager/policy_static.go @@ -29,6 +29,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask" + "k8s.io/kubernetes/pkg/kubelet/managed" "k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/utils/cpuset" ) @@ -214,6 +215,10 @@ func (p *staticPolicy) validateState(s state.State) error { // state is empty initialize s.SetDefaultCPUSet(allCPUs) klog.InfoS("Static policy initialized", "defaultCPUSet", allCPUs) + if managed.IsEnabled() { + defaultCpus := s.GetDefaultCPUSet().Difference(p.reservedCPUs) + s.SetDefaultCPUSet(defaultCpus) + } return nil } @@ -227,7 +232,9 @@ func (p *staticPolicy) validateState(s state.State) error { p.reservedCPUs.Intersection(tmpDefaultCPUset).String(), tmpDefaultCPUset.String()) } } else { - if !p.reservedCPUs.Intersection(tmpDefaultCPUset).Equals(p.reservedCPUs) { + // 2. This only applies when managed mode is disabled. Active workload partitioning feature + // removes the reserved cpus from the default cpu mask on purpose. + if !managed.IsEnabled() && !p.reservedCPUs.Intersection(tmpDefaultCPUset).Equals(p.reservedCPUs) { return fmt.Errorf("not all reserved cpus: \"%s\" are present in defaultCpuSet: \"%s\"", p.reservedCPUs.String(), tmpDefaultCPUset.String()) } @@ -259,10 +266,17 @@ func (p *staticPolicy) validateState(s state.State) error { } } totalKnownCPUs = totalKnownCPUs.Union(tmpCPUSets...) - if !totalKnownCPUs.Equals(allCPUs) { - return fmt.Errorf("current set of available CPUs \"%s\" doesn't match with CPUs in state \"%s\"", - allCPUs.String(), totalKnownCPUs.String()) + availableCPUs := p.topology.CPUDetails.CPUs() + // CPU (workload) partitioning removes reserved cpus + // from the default mask intentionally + if managed.IsEnabled() { + availableCPUs = availableCPUs.Difference(p.reservedCPUs) + } + + if !totalKnownCPUs.Equals(availableCPUs) { + return fmt.Errorf("current set of available CPUs \"%s\" doesn't match with CPUs in state \"%s\"", + availableCPUs.String(), totalKnownCPUs.String()) } return nil diff --git a/pkg/kubelet/cm/cpumanager/policy_static_test.go b/pkg/kubelet/cm/cpumanager/policy_static_test.go index 9e9618873cd8e..87983038f91a6 100644 --- a/pkg/kubelet/cm/cpumanager/policy_static_test.go +++ b/pkg/kubelet/cm/cpumanager/policy_static_test.go @@ -21,6 +21,8 @@ import ( "reflect" "testing" + "k8s.io/kubernetes/pkg/kubelet/managed" + v1 "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" featuregatetesting "k8s.io/component-base/featuregate/testing" @@ -961,18 +963,19 @@ func TestTopologyAwareAllocateCPUs(t *testing.T) { // above test cases are without kubelet --reserved-cpus cmd option // the following tests are with --reserved-cpus configured type staticPolicyTestWithResvList struct { - description string - topo *topology.CPUTopology - numReservedCPUs int - reserved cpuset.CPUSet - cpuPolicyOptions map[string]string - stAssignments state.ContainerCPUAssignments - stDefaultCPUSet cpuset.CPUSet - pod *v1.Pod - expErr error - expNewErr error - expCPUAlloc bool - expCSet cpuset.CPUSet + description string + topo *topology.CPUTopology + numReservedCPUs int + reserved cpuset.CPUSet + cpuPolicyOptions map[string]string + stAssignments state.ContainerCPUAssignments + stDefaultCPUSet cpuset.CPUSet + pod *v1.Pod + expErr error + expNewErr error + expCPUAlloc bool + expCSet cpuset.CPUSet + managementPartition bool } func TestStaticPolicyStartWithResvList(t *testing.T) { @@ -1024,9 +1027,31 @@ func TestStaticPolicyStartWithResvList(t *testing.T) { stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), expNewErr: fmt.Errorf("[cpumanager] unable to reserve the required amount of CPUs (size of 0-1 did not equal 1)"), }, + { + description: "reserved cores 0 & 6 are not present in available cpuset when management partitioning is enabled", + topo: topoDualSocketHT, + numReservedCPUs: 2, + stAssignments: state.ContainerCPUAssignments{}, + managementPartition: true, + expCSet: cpuset.New(1, 2, 3, 4, 5, 7, 8, 9, 10, 11), + }, + { + description: "reserved cores 0 & 6 are not present in available cpuset when management partitioning is enabled during recovery", + topo: topoDualSocketHT, + numReservedCPUs: 2, + stAssignments: state.ContainerCPUAssignments{}, + stDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 7, 8, 9, 10, 11), + managementPartition: true, + expCSet: cpuset.New(1, 2, 3, 4, 5, 7, 8, 9, 10, 11), + }, } for _, testCase := range testCases { t.Run(testCase.description, func(t *testing.T) { + wasManaged := managed.IsEnabled() + managed.TestOnlySetEnabled(testCase.managementPartition) + defer func() { + managed.TestOnlySetEnabled(wasManaged) + }() featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.CPUManagerPolicyAlphaOptions, true) p, err := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager(), testCase.cpuPolicyOptions) if !reflect.DeepEqual(err, testCase.expNewErr) { diff --git a/pkg/kubelet/cm/qos_container_manager_linux.go b/pkg/kubelet/cm/qos_container_manager_linux.go index 0f88e10ff69bc..499373eba3cd7 100644 --- a/pkg/kubelet/cm/qos_container_manager_linux.go +++ b/pkg/kubelet/cm/qos_container_manager_linux.go @@ -35,6 +35,7 @@ import ( "k8s.io/component-helpers/resource" v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" kubefeatures "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/kubelet/managed" ) const ( @@ -174,6 +175,9 @@ func (m *qosContainerManagerImpl) setCPUCgroupConfig(configs map[v1.PodQOSClass] reuseReqs := make(v1.ResourceList, 4) for i := range pods { pod := pods[i] + if enabled, _, _ := managed.IsPodManaged(pod); enabled { + continue + } qosClass := v1qos.GetPodQOS(pod) if qosClass != v1.PodQOSBurstable { // we only care about the burstable qos tier diff --git a/pkg/kubelet/cm/types.go b/pkg/kubelet/cm/types.go index e6338d3af8138..40195e9cd1d62 100644 --- a/pkg/kubelet/cm/types.go +++ b/pkg/kubelet/cm/types.go @@ -93,6 +93,8 @@ type CgroupManager interface { SetCgroupConfig(name CgroupName, resourceConfig *ResourceConfig) error // Version of the cgroup implementation on the host Version() int + // Toggle whether CPU load balancing should be disabled for new cgroups the kubelet creates + SetCPULoadBalanceDisable() } // QOSContainersInfo stores the names of containers per qos diff --git a/pkg/kubelet/config/file.go b/pkg/kubelet/config/file.go index 79e2af6ed6216..d2526ec989d97 100644 --- a/pkg/kubelet/config/file.go +++ b/pkg/kubelet/config/file.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/cache" api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/kubelet/managed" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" utilio "k8s.io/utils/io" ) @@ -230,6 +231,16 @@ func (s *sourceFile) extractFromFile(filename string) (pod *v1.Pod, err error) { if podErr != nil { return pod, podErr } + if managed.IsEnabled() { + if newPod, _, err := managed.ModifyStaticPodForPinnedManagement(pod); err != nil { + klog.V(2).Error(err, "Static Pod is managed but errored", "name", pod.ObjectMeta.Name, "namespace", pod.ObjectMeta.Namespace) + } else if newPod != nil { + klog.V(2).InfoS("Static Pod is managed. Using modified pod", "name", newPod.ObjectMeta.Name, "namespace", newPod.ObjectMeta.Namespace, "annotations", newPod.Annotations) + pod = newPod + } else { + klog.V(2).InfoS("Static Pod is not managed", "name", pod.ObjectMeta.Name, "namespace", pod.ObjectMeta.Namespace) + } + } return pod, nil } diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index b8bbfece86024..15937cb839e96 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -95,6 +95,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/kuberuntime" "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/logs" + "k8s.io/kubernetes/pkg/kubelet/managed" "k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/pkg/kubelet/metrics/collectors" "k8s.io/kubernetes/pkg/kubelet/network/dns" @@ -112,6 +113,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/server" servermetrics "k8s.io/kubernetes/pkg/kubelet/server/metrics" serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats" + "k8s.io/kubernetes/pkg/kubelet/sharedcpus" "k8s.io/kubernetes/pkg/kubelet/stats" "k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/sysctl" @@ -170,10 +172,6 @@ const ( // the cache. runtimeCacheRefreshPeriod = housekeepingPeriod + housekeepingWarningDuration - // Period for performing eviction monitoring. - // ensure this is kept in sync with internal cadvisor housekeeping. - evictionMonitoringPeriod = time.Second * 10 - // The path in containers' filesystems where the hosts file is mounted. linuxEtcHostsPath = "/etc/hosts" windowsEtcHostsPath = "C:\\Windows\\System32\\drivers\\etc\\hosts" @@ -250,8 +248,21 @@ var ( // This is exposed for unit tests. goos = sysruntime.GOOS + + // Period for performing eviction monitoring. + // ensure this is kept in sync with internal cadvisor housekeeping. + evictionMonitoringPeriod = time.Second * 10 ) +func init() { + if value := os.Getenv("OPENSHIFT_EVICTION_MONITORING_PERIOD_DURATION"); value != "" { + if duration, err := time.ParseDuration(value); err == nil { + klog.Infof("Detected OPENSHIFT_EVICTION_MONITORING_PERIOD_DURATION: %v", value) + evictionMonitoringPeriod = duration + } + } +} + func getContainerEtcHostsPath() string { if goos == "windows" { return windowsEtcHostsPath @@ -668,6 +679,13 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.runtimeService = kubeDeps.RemoteRuntimeService + if managed.IsEnabled() { + klog.InfoS("Pinned Workload Management Enabled") + } + if sharedcpus.IsEnabled() { + klog.InfoS("Mixed CPUs Workload Enabled") + } + if kubeDeps.KubeClient != nil { klet.runtimeClassManager = runtimeclass.NewManager(kubeDeps.KubeClient) } @@ -1685,16 +1703,13 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { http.Error(w, errs.ToAggregate().Error(), http.StatusBadRequest) return } else if nlq != nil { - if req.URL.Path != "/" && req.URL.Path != "" { - http.Error(w, "path not allowed in query mode", http.StatusNotAcceptable) - return - } if errs := nlq.validate(); len(errs) > 0 { http.Error(w, errs.ToAggregate().Error(), http.StatusNotAcceptable) return } // Validation ensures that the request does not query services and files at the same time - if len(nlq.Services) > 0 { + // OCP: Presence of journal in the path indicates it is a query for service(s) + if len(nlq.Services) > 0 || req.URL.Path == "journal" || req.URL.Path == "journal/" { journal.ServeHTTP(w, req) return } diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index 0505821a75bbb..7b34b0a48c6ec 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -42,8 +42,11 @@ import ( kubeletapis "k8s.io/kubelet/pkg/apis" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/events" + "k8s.io/kubernetes/pkg/kubelet/managed" "k8s.io/kubernetes/pkg/kubelet/nodestatus" + "k8s.io/kubernetes/pkg/kubelet/sharedcpus" taintutil "k8s.io/kubernetes/pkg/util/taints" volutil "k8s.io/kubernetes/pkg/volume/util" ) @@ -131,6 +134,10 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool { requiresUpdate = kl.updateDefaultLabels(node, existingNode) || requiresUpdate requiresUpdate = kl.reconcileExtendedResource(node, existingNode) || requiresUpdate requiresUpdate = kl.reconcileHugePageResource(node, existingNode) || requiresUpdate + if managed.IsEnabled() { + requiresUpdate = kl.addManagementNodeCapacity(node, existingNode) || requiresUpdate + } + requiresUpdate = kl.reconcileSharedCPUsNodeCapacity(node, existingNode) || requiresUpdate if requiresUpdate { if _, _, err := nodeutil.PatchNodeStatus(kl.kubeClient.CoreV1(), types.NodeName(kl.nodeName), originalNode, existingNode); err != nil { klog.ErrorS(err, "Unable to reconcile node with API server,error updating node", "node", klog.KObj(node)) @@ -141,6 +148,44 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool { return true } +// addManagementNodeCapacity adds the managednode capacity to the node +func (kl *Kubelet) addManagementNodeCapacity(initialNode, existingNode *v1.Node) bool { + updateDefaultResources(initialNode, existingNode) + machineInfo, err := kl.cadvisor.MachineInfo() + if err != nil { + klog.Errorf("Unable to calculate managed node capacity for %q: %v", kl.nodeName, err) + return false + } + cpuRequest := cadvisor.CapacityFromMachineInfo(machineInfo)[v1.ResourceCPU] + cpuRequestInMilli := cpuRequest.MilliValue() + newCPURequest := resource.NewMilliQuantity(cpuRequestInMilli*1000, cpuRequest.Format) + managedResourceName := managed.GenerateResourceName("management") + if existingCapacity, ok := existingNode.Status.Capacity[managedResourceName]; ok && existingCapacity.Equal(*newCPURequest) { + return false + } + existingNode.Status.Capacity[managedResourceName] = *newCPURequest + return true +} + +func (kl *Kubelet) reconcileSharedCPUsNodeCapacity(initialNode, existingNode *v1.Node) bool { + updateDefaultResources(initialNode, existingNode) + sharedCPUsResourceName := sharedcpus.GetResourceName() + // delete resources in case they exist and feature has been disabled + if !sharedcpus.IsEnabled() { + if _, ok := existingNode.Status.Capacity[sharedCPUsResourceName]; ok { + delete(existingNode.Status.Capacity, sharedCPUsResourceName) + return true + } + return false + } + q := resource.NewQuantity(sharedcpus.GetConfig().ContainersLimit, resource.DecimalSI) + if existingCapacity, ok := existingNode.Status.Capacity[sharedCPUsResourceName]; ok && existingCapacity.Equal(*q) { + return false + } + existingNode.Status.Capacity[sharedCPUsResourceName] = *q + return true +} + // reconcileHugePageResource will update huge page capacity for each page size and remove huge page sizes no longer supported func (kl *Kubelet) reconcileHugePageResource(initialNode, existingNode *v1.Node) bool { requiresUpdate := updateDefaultResources(initialNode, existingNode) @@ -432,6 +477,10 @@ func (kl *Kubelet) initialNode(ctx context.Context) (*v1.Node, error) { } } } + if managed.IsEnabled() { + kl.addManagementNodeCapacity(node, node) + } + kl.reconcileSharedCPUsNodeCapacity(node, node) kl.setNodeStatus(ctx, node) diff --git a/pkg/kubelet/kubelet_server_journal.go b/pkg/kubelet/kubelet_server_journal.go index 0159910803e30..4ad5dcea527f3 100644 --- a/pkg/kubelet/kubelet_server_journal.go +++ b/pkg/kubelet/kubelet_server_journal.go @@ -35,7 +35,7 @@ import ( "time" securejoin "github.com/cyphar/filepath-securejoin" - + "k8s.io/apimachinery/pkg/util/sets" utilvalidation "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" ) @@ -54,6 +54,7 @@ var ( // character cannot be used to create invalid sequences. This is intended as a broad defense against malformed // input that could cause an escape. reServiceNameUnsafeCharacters = regexp.MustCompile(`[^a-zA-Z\-_.:0-9@]+`) + reRelativeDate = regexp.MustCompile(`^(\+|\-)?[\d]+(s|m|h|d)$`) ) // journalServer returns text output from the OS specific service logger to view @@ -114,6 +115,19 @@ type options struct { // Pattern filters log entries by the provided regex pattern. On Linux nodes, this pattern will be read as a // PCRE2 regex, on Windows nodes it will be read as a PowerShell regex. Support for this is implementation specific. Pattern string + ocAdm +} + +// ocAdm encapsulates the oc adm node-logs specific options +type ocAdm struct { + // Since is an ISO timestamp or relative date from which to show logs + Since string + // Until is an ISO timestamp or relative date until which to show logs + Until string + // Format is the alternate format (short, cat, json, short-unix) to display journal logs + Format string + // CaseSensitive controls the case sensitivity of pattern searches + CaseSensitive bool } // newNodeLogQuery parses query values and converts all known options into nodeLogQuery @@ -122,7 +136,7 @@ func newNodeLogQuery(query url.Values) (*nodeLogQuery, field.ErrorList) { var nlq nodeLogQuery var err error - queries, ok := query["query"] + queries, okQuery := query["query"] if len(queries) > 0 { for _, q := range queries { // The presence of / or \ is a hint that the query is for a log file. If the query is for foo.log without a @@ -134,11 +148,20 @@ func newNodeLogQuery(query url.Values) (*nodeLogQuery, field.ErrorList) { } } } + units, okUnit := query["unit"] + if len(units) > 0 { + for _, u := range units { + // We don't check for files as the heuristics do not apply to unit + if strings.TrimSpace(u) != "" { // Prevent queries with just spaces + nlq.Services = append(nlq.Services, u) + } + } + } // Prevent specifying an empty or blank space query. // Example: kubectl get --raw /api/v1/nodes/$node/proxy/logs?query=" " - if ok && (len(nlq.Files) == 0 && len(nlq.Services) == 0) { - allErrs = append(allErrs, field.Invalid(field.NewPath("query"), queries, "query cannot be empty")) + if (okQuery || okUnit) && (len(nlq.Files) == 0 && len(nlq.Services) == 0) { + allErrs = append(allErrs, field.Invalid(field.NewPath("unit"), queries, "unit cannot be empty")) } var sinceTime time.Time @@ -176,6 +199,9 @@ func newNodeLogQuery(query url.Values) (*nodeLogQuery, field.ErrorList) { var tailLines int tailLinesValue := query.Get("tailLines") + if len(tailLinesValue) == 0 { + tailLinesValue = query.Get("tail") + } if len(tailLinesValue) > 0 { tailLines, err = strconv.Atoi(tailLinesValue) if err != nil { @@ -186,15 +212,28 @@ func newNodeLogQuery(query url.Values) (*nodeLogQuery, field.ErrorList) { } pattern := query.Get("pattern") + if len(pattern) == 0 { + pattern = query.Get("grep") + } if len(pattern) > 0 { nlq.Pattern = pattern + caseSensitiveValue := query.Get("case-sensitive") + if len(caseSensitiveValue) > 0 { + caseSensitive, err := strconv.ParseBool(query.Get("case-sensitive")) + if err != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("case-sensitive"), query.Get("case-sensitive"), + err.Error())) + } else { + nlq.CaseSensitive = caseSensitive + } + } } - if len(allErrs) > 0 { - return nil, allErrs - } + nlq.Since = query.Get("since") + nlq.Until = query.Get("until") + nlq.Format = query.Get("output") - if reflect.DeepEqual(nlq, nodeLogQuery{}) { + if len(allErrs) > 0 { return nil, allErrs } @@ -219,14 +258,13 @@ func validateServices(services []string) field.ErrorList { func (n *nodeLogQuery) validate() field.ErrorList { allErrs := validateServices(n.Services) switch { - case len(n.Files) == 0 && len(n.Services) == 0: - allErrs = append(allErrs, field.Required(field.NewPath("query"), "cannot be empty with options")) + // OCP: Allow len(n.Files) == 0 && len(n.Services) == 0 as we want to be able to return all journal / WinEvent logs case len(n.Files) > 0 && len(n.Services) > 0: allErrs = append(allErrs, field.Invalid(field.NewPath("query"), fmt.Sprintf("%v, %v", n.Files, n.Services), "cannot specify a file and service")) case len(n.Files) > 1: allErrs = append(allErrs, field.Invalid(field.NewPath("query"), n.Files, "cannot specify more than one file")) - case len(n.Files) == 1 && n.options != (options{}): + case len(n.Files) == 1 && !reflect.DeepEqual(n.options, options{}): allErrs = append(allErrs, field.Invalid(field.NewPath("query"), n.Files, "cannot specify file with options")) case len(n.Files) == 1: if fullLogFilename, err := securejoin.SecureJoin(nodeLogDir, n.Files[0]); err != nil { @@ -258,6 +296,35 @@ func (n *nodeLogQuery) validate() field.ErrorList { allErrs = append(allErrs, field.Invalid(field.NewPath("pattern"), n.Pattern, err.Error())) } + // "oc adm node-logs" specific validation + + if n.SinceTime != nil && (len(n.Since) > 0 || len(n.Until) > 0) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("sinceTime"), + "`since or until` and `sinceTime` cannot be specified")) + } + + if n.UntilTime != nil && (len(n.Since) > 0 || len(n.Until) > 0) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("untilTime"), + "`since or until` and `untilTime` cannot be specified")) + } + + if err := validateDate(n.Since); err != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("since"), n.Since, err.Error())) + } + + if err := validateDate(n.Until); err != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("until"), n.Until, err.Error())) + } + + allowedFormats := sets.New[string]("short-precise", "json", "short", "short-unix", "short-iso", + "short-iso-precise", "cat", "") + if len(n.Format) > 0 && runtime.GOOS == "windows" { + allErrs = append(allErrs, field.Invalid(field.NewPath("output"), n.Format, + "output is not supported on Windows")) + } else if !allowedFormats.Has(n.Format) { + allErrs = append(allErrs, field.NotSupported(field.NewPath("output"), n.Format, allowedFormats.UnsortedList())) + } + return allErrs } @@ -280,11 +347,8 @@ func (n *nodeLogQuery) copyForBoot(ctx context.Context, w io.Writer, previousBoo return } nativeLoggers, fileLoggers := n.splitNativeVsFileLoggers(ctx) - if len(nativeLoggers) > 0 { - n.copyServiceLogs(ctx, w, nativeLoggers, previousBoot) - } - if len(fileLoggers) > 0 && n.options != (options{}) { + if len(fileLoggers) > 0 && !reflect.DeepEqual(n.options, options{}) { fmt.Fprintf(w, "\noptions present and query resolved to log files for %v\ntry without specifying options\n", fileLoggers) return @@ -292,7 +356,11 @@ func (n *nodeLogQuery) copyForBoot(ctx context.Context, w io.Writer, previousBoo if len(fileLoggers) > 0 { copyFileLogs(ctx, w, fileLoggers) + return } + // OCP: Return all logs in the case where nativeLoggers == "" + n.copyServiceLogs(ctx, w, nativeLoggers, previousBoot) + } // splitNativeVsFileLoggers checks if each service logs to native OS logs or to a file and returns a list of services @@ -413,3 +481,16 @@ func safeServiceName(s string) error { } return nil } + +func validateDate(date string) error { + if len(date) == 0 { + return nil + } + if reRelativeDate.MatchString(date) { + return nil + } + if _, err := time.Parse(dateLayout, date); err == nil { + return nil + } + return fmt.Errorf("date must be a relative time of the form '(+|-)[0-9]+(s|m|h|d)' or a date in 'YYYY-MM-DD HH:MM:SS' form") +} diff --git a/pkg/kubelet/kubelet_server_journal_linux.go b/pkg/kubelet/kubelet_server_journal_linux.go index 0265d08fccb16..6bd22c8e749ef 100644 --- a/pkg/kubelet/kubelet_server_journal_linux.go +++ b/pkg/kubelet/kubelet_server_journal_linux.go @@ -36,14 +36,20 @@ func getLoggingCmd(n *nodeLogQuery, services []string) (string, []string, error) args := []string{ "--utc", "--no-pager", - "--output=short-precise", } - if n.SinceTime != nil { + + if len(n.Since) > 0 { + args = append(args, fmt.Sprintf("--since=%s", n.Since)) + } else if n.SinceTime != nil { args = append(args, fmt.Sprintf("--since=%s", n.SinceTime.Format(dateLayout))) } - if n.UntilTime != nil { - args = append(args, fmt.Sprintf("--until=%s", n.UntilTime.Format(dateLayout))) + + if len(n.Until) > 0 { + args = append(args, fmt.Sprintf("--until=%s", n.Until)) + } else if n.UntilTime != nil { + args = append(args, fmt.Sprintf("--until=%s", n.SinceTime.Format(dateLayout))) } + if n.TailLines != nil { args = append(args, "--pager-end", fmt.Sprintf("--lines=%d", *n.TailLines)) } @@ -54,12 +60,21 @@ func getLoggingCmd(n *nodeLogQuery, services []string) (string, []string, error) } if len(n.Pattern) > 0 { args = append(args, "--grep="+n.Pattern) + args = append(args, fmt.Sprintf("--case-sensitive=%t", n.CaseSensitive)) } if n.Boot != nil { args = append(args, "--boot", fmt.Sprintf("%d", *n.Boot)) } + var output string + if len(n.Format) > 0 { + output = n.Format + } else { + output = "short-precise" + } + args = append(args, fmt.Sprintf("--output=%s", output)) + return "journalctl", args, nil } diff --git a/pkg/kubelet/kubelet_server_journal_test.go b/pkg/kubelet/kubelet_server_journal_test.go index 7e8c13c1f8472..30d3ef8103234 100644 --- a/pkg/kubelet/kubelet_server_journal_test.go +++ b/pkg/kubelet/kubelet_server_journal_test.go @@ -78,10 +78,10 @@ func Test_newNodeLogQuery(t *testing.T) { want *nodeLogQuery wantErr bool }{ - {name: "empty", query: url.Values{}, want: nil}, - {query: url.Values{"unknown": []string{"true"}}, want: nil}, + {name: "empty", query: url.Values{}, want: &nodeLogQuery{}}, + {query: url.Values{"unknown": []string{"true"}}, want: &nodeLogQuery{}}, - {query: url.Values{"sinceTime": []string{""}}, want: nil}, + {query: url.Values{"sinceTime": []string{""}}, want: &nodeLogQuery{}}, {query: url.Values{"sinceTime": []string{"2019-12-04 02:00:00"}}, wantErr: true}, {query: url.Values{"sinceTime": []string{"2019-12-04 02:00:00.000"}}, wantErr: true}, {query: url.Values{"sinceTime": []string{"2019-12-04 02"}}, wantErr: true}, @@ -89,7 +89,7 @@ func Test_newNodeLogQuery(t *testing.T) { {query: url.Values{"sinceTime": []string{validTimeValue}}, want: &nodeLogQuery{options: options{SinceTime: &validT}}}, - {query: url.Values{"untilTime": []string{""}}, want: nil}, + {query: url.Values{"untilTime": []string{""}}, want: &nodeLogQuery{}}, {query: url.Values{"untilTime": []string{"2019-12-04 02:00:00"}}, wantErr: true}, {query: url.Values{"untilTime": []string{"2019-12-04 02:00:00.000"}}, wantErr: true}, {query: url.Values{"untilTime": []string{"2019-12-04 02"}}, wantErr: true}, @@ -103,7 +103,6 @@ func Test_newNodeLogQuery(t *testing.T) { {query: url.Values{"pattern": []string{"foo"}}, want: &nodeLogQuery{options: options{Pattern: "foo"}}}, - {query: url.Values{"boot": []string{""}}, want: nil}, {query: url.Values{"boot": []string{"0"}}, want: &nodeLogQuery{options: options{Boot: ptr.To(0)}}}, {query: url.Values{"boot": []string{"-23"}}, want: &nodeLogQuery{options: options{Boot: ptr.To(-23)}}}, {query: url.Values{"boot": []string{"foo"}}, wantErr: true}, @@ -116,6 +115,11 @@ func Test_newNodeLogQuery(t *testing.T) { {query: url.Values{"query": []string{"foo", "/bar"}}, want: &nodeLogQuery{Services: []string{"foo"}, Files: []string{"/bar"}}}, {query: url.Values{"query": []string{"/foo", `\bar`}}, want: &nodeLogQuery{Files: []string{"/foo", `\bar`}}}, + {query: url.Values{"unit": []string{""}}, wantErr: true}, + {query: url.Values{"unit": []string{" ", " "}}, wantErr: true}, + {query: url.Values{"unit": []string{"foo"}}, want: &nodeLogQuery{Services: []string{"foo"}}}, + {query: url.Values{"unit": []string{"foo", "bar"}}, want: &nodeLogQuery{Services: []string{"foo", "bar"}}}, + {query: url.Values{"unit": []string{"foo", "/bar"}}, want: &nodeLogQuery{Services: []string{"foo", "/bar"}}}, } for _, tt := range tests { t.Run(tt.query.Encode(), func(t *testing.T) { @@ -180,10 +184,12 @@ func Test_nodeLogQuery_validate(t *testing.T) { pattern = "foo" invalid = "foo\\" ) - since, err := time.Parse(time.RFC3339, "2023-01-04T02:00:00Z") + sinceTime, err := time.Parse(time.RFC3339, "2023-01-04T02:00:00Z") assert.NoError(t, err) - until, err := time.Parse(time.RFC3339, "2023-02-04T02:00:00Z") + untilTime, err := time.Parse(time.RFC3339, "2023-02-04T02:00:00Z") assert.NoError(t, err) + since := "2019-12-04 02:00:00" + until := "2019-12-04 03:00:00" tests := []struct { name string @@ -192,23 +198,37 @@ func Test_nodeLogQuery_validate(t *testing.T) { options options wantErr bool }{ - {name: "empty", wantErr: true}, - {name: "empty with options", options: options{SinceTime: &since}, wantErr: true}, + {name: "empty"}, + {name: "empty with options", options: options{SinceTime: &sinceTime}}, {name: "one service", Services: []string{service1}}, {name: "two services", Services: []string{service1, service2}}, {name: "one service one file", Services: []string{service1}, Files: []string{file1}, wantErr: true}, {name: "two files", Files: []string{file1, file2}, wantErr: true}, {name: "one file options", Files: []string{file1}, options: options{Pattern: pattern}, wantErr: true}, {name: "invalid pattern", Services: []string{service1}, options: options{Pattern: invalid}, wantErr: true}, - {name: "since", Services: []string{service1}, options: options{SinceTime: &since}}, - {name: "until", Services: []string{service1}, options: options{UntilTime: &until}}, - {name: "since until", Services: []string{service1}, options: options{SinceTime: &until, UntilTime: &since}, - wantErr: true}, - // boot is not supported on Windows. - {name: "boot", Services: []string{service1}, options: options{Boot: ptr.To(-1)}, wantErr: runtime.GOOS == "windows"}, + {name: "sinceTime", Services: []string{service1}, options: options{SinceTime: &sinceTime}}, + {name: "untilTime", Services: []string{service1}, options: options{UntilTime: &untilTime}}, + {name: "sinceTime untilTime", Services: []string{service1}, options: options{SinceTime: &untilTime, + UntilTime: &sinceTime}, wantErr: true}, + {name: "boot", Services: []string{service1}, options: options{Boot: ptr.To(-1)}}, {name: "boot out of range", Services: []string{service1}, options: options{Boot: ptr.To(1)}, wantErr: true}, {name: "tailLines", Services: []string{service1}, options: options{TailLines: ptr.To(100)}}, {name: "tailLines out of range", Services: []string{service1}, options: options{TailLines: ptr.To(100000)}}, + {name: "since", Services: []string{service1}, options: options{ocAdm: ocAdm{Since: since}}}, + {name: "since RFC3339", Services: []string{service1}, options: options{ocAdm: ocAdm{Since: sinceTime.String()}}, wantErr: true}, + {name: "until", Services: []string{service1}, options: options{ocAdm: ocAdm{Until: until}}}, + {name: "until RFC3339", Services: []string{service1}, options: options{ocAdm: ocAdm{Until: untilTime.String()}}, wantErr: true}, + {name: "since sinceTime", Services: []string{service1}, options: options{SinceTime: &sinceTime, + ocAdm: ocAdm{Since: since}}, wantErr: true}, + {name: "until sinceTime", Services: []string{service1}, options: options{SinceTime: &sinceTime, + ocAdm: ocAdm{Until: until}}, wantErr: true}, + {name: "since untilTime", Services: []string{service1}, options: options{UntilTime: &untilTime, + ocAdm: ocAdm{Since: since}}, wantErr: true}, + {name: "until untilTime", Services: []string{service1}, options: options{UntilTime: &untilTime, + ocAdm: ocAdm{Until: until}}, wantErr: true}, + {name: "format", Services: []string{service1}, options: options{ocAdm: ocAdm{Format: "cat"}}}, + {name: "format invalid", Services: []string{service1}, options: options{ocAdm: ocAdm{Format: "foo"}}, + wantErr: true}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/kubelet/kubelet_server_journal_windows.go b/pkg/kubelet/kubelet_server_journal_windows.go index a805cfc5453d8..5134d65e8d2d5 100644 --- a/pkg/kubelet/kubelet_server_journal_windows.go +++ b/pkg/kubelet/kubelet_server_journal_windows.go @@ -36,10 +36,15 @@ func getLoggingCmd(n *nodeLogQuery, services []string) (string, []string, error) } psCmd := "Get-WinEvent -FilterHashtable @{LogName='Application'" - if n.SinceTime != nil { + if len(n.Since) > 0 { + psCmd += fmt.Sprintf("; StartTime='%s'", n.Since) + } else if n.SinceTime != nil { psCmd += fmt.Sprintf("; StartTime='%s'", n.SinceTime.Format(dateLayout)) } - if n.UntilTime != nil { + + if len(n.Until) > 0 { + psCmd += fmt.Sprintf("; EndTime='%s'", n.Until) + } else if n.UntilTime != nil { psCmd += fmt.Sprintf("; EndTime='%s'", n.UntilTime.Format(dateLayout)) } var providers []string diff --git a/pkg/kubelet/llcalign/llcalign.go b/pkg/kubelet/llcalign/llcalign.go new file mode 100644 index 0000000000000..77293dbe52874 --- /dev/null +++ b/pkg/kubelet/llcalign/llcalign.go @@ -0,0 +1,46 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package llcalign + +import ( + "os" +) + +var ( + llcAlignmentEnabled bool + llcAlignmentFilename = "/etc/kubernetes/openshift-llc-alignment" +) + +func init() { + readEnablementFile() +} + +func readEnablementFile() { + if _, err := os.Stat(llcAlignmentFilename); err == nil { + llcAlignmentEnabled = true + } +} + +func IsEnabled() bool { + return llcAlignmentEnabled +} + +func TestOnlySetEnabled(enabled bool) bool { + oldEnabled := llcAlignmentEnabled + llcAlignmentEnabled = enabled + return oldEnabled +} diff --git a/pkg/kubelet/managed/cpu_shares.go b/pkg/kubelet/managed/cpu_shares.go new file mode 100644 index 0000000000000..de60b6d6e755e --- /dev/null +++ b/pkg/kubelet/managed/cpu_shares.go @@ -0,0 +1,30 @@ +package managed + +const ( + // These limits are defined in the kernel: + // https://github.com/torvalds/linux/blob/0bddd227f3dc55975e2b8dfa7fc6f959b062a2c7/kernel/sched/sched.h#L427-L428 + MinShares = 2 + MaxShares = 262144 + + SharesPerCPU = 1024 + MilliCPUToCPU = 1000 +) + +// MilliCPUToShares converts the milliCPU to CFS shares. +func MilliCPUToShares(milliCPU int64) uint64 { + if milliCPU == 0 { + // Docker converts zero milliCPU to unset, which maps to kernel default + // for unset: 1024. Return 2 here to really match kernel default for + // zero milliCPU. + return MinShares + } + // Conceptually (milliCPU / milliCPUToCPU) * sharesPerCPU, but factored to improve rounding. + shares := (milliCPU * SharesPerCPU) / MilliCPUToCPU + if shares < MinShares { + return MinShares + } + if shares > MaxShares { + return MaxShares + } + return uint64(shares) +} diff --git a/pkg/kubelet/managed/managed.go b/pkg/kubelet/managed/managed.go new file mode 100644 index 0000000000000..4063d5381d6e2 --- /dev/null +++ b/pkg/kubelet/managed/managed.go @@ -0,0 +1,210 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managed + +import ( + "encoding/json" + "fmt" + "os" + "strings" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +var ( + pinnedManagementEnabled bool + pinnedManagementFilename = "/etc/kubernetes/openshift-workload-pinning" +) + +const ( + qosWarning = "skipping pod CPUs requests modifications because it has guaranteed QoS class" + WorkloadsAnnotationPrefix = "target.workload.openshift.io/" + WorkloadsCapacitySuffix = "workload.openshift.io/cores" + ContainerAnnotationPrefix = "resources.workload.openshift.io/" + WorkloadAnnotationWarning = "workload.openshift.io/warning" +) + +type WorkloadContainerAnnotation struct { + CpuShares uint64 `json:"cpushares"` + CpuLimit int64 `json:"cpulimit,omitempty"` +} + +func NewWorkloadContainerAnnotation(cpushares uint64) WorkloadContainerAnnotation { + return WorkloadContainerAnnotation{ + CpuShares: cpushares, + } +} + +func (w WorkloadContainerAnnotation) Serialize() ([]byte, error) { + return json.Marshal(w) +} + +func init() { + readEnablementFile() +} + +func readEnablementFile() { + if _, err := os.Stat(pinnedManagementFilename); err == nil { + pinnedManagementEnabled = true + } +} + +// TestOnlySetEnabled allows changing the state of management partition enablement +// This method MUST NOT be used outside of test code +func TestOnlySetEnabled(enabled bool) { + pinnedManagementEnabled = enabled +} + +func IsEnabled() bool { + return pinnedManagementEnabled +} + +// IsPodManaged returns true and the name of the workload if enabled. +// returns true, workload name, and the annotation payload. +func IsPodManaged(pod *v1.Pod) (bool, string, string) { + if pod.ObjectMeta.Annotations == nil { + return false, "", "" + } + for annotation, value := range pod.ObjectMeta.Annotations { + if strings.HasPrefix(annotation, WorkloadsAnnotationPrefix) { + return true, strings.TrimPrefix(annotation, WorkloadsAnnotationPrefix), value + } + } + return false, "", "" +} + +// ModifyStaticPodForPinnedManagement will modify a pod for pod management +func ModifyStaticPodForPinnedManagement(pod *v1.Pod) (*v1.Pod, string, error) { + pod = pod.DeepCopy() + enabled, workloadName, value := IsPodManaged(pod) + if !enabled { + return nil, "", nil + } + if pod.Annotations == nil { + pod.Annotations = make(map[string]string) + } + if isPodGuaranteed(pod) { + stripWorkloadAnnotations(pod.Annotations) + pod.Annotations[WorkloadAnnotationWarning] = qosWarning + return pod, "", nil + } + pod.Annotations[fmt.Sprintf("%v%v", WorkloadsAnnotationPrefix, workloadName)] = value + if err := updateContainers(workloadName, pod); err != nil { + return nil, "", err + } + return pod, workloadName, nil +} + +func GenerateResourceName(workloadName string) v1.ResourceName { + return v1.ResourceName(fmt.Sprintf("%v.%v", workloadName, WorkloadsCapacitySuffix)) +} + +func updateContainers(workloadName string, pod *v1.Pod) error { + updateContainer := func(container *v1.Container) error { + if container.Resources.Requests == nil { + return fmt.Errorf("managed container %v does not have Resource.Requests", container.Name) + } + if _, ok := container.Resources.Requests[v1.ResourceCPU]; !ok { + return fmt.Errorf("managed container %v does not have cpu requests", container.Name) + } + if _, ok := container.Resources.Requests[v1.ResourceMemory]; !ok { + return fmt.Errorf("managed container %v does not have memory requests", container.Name) + } + if container.Resources.Limits == nil { + container.Resources.Limits = v1.ResourceList{} + } + cpuRequest := container.Resources.Requests[v1.ResourceCPU] + cpuRequestInMilli := cpuRequest.MilliValue() + + containerAnnotation := NewWorkloadContainerAnnotation(MilliCPUToShares(cpuRequestInMilli)) + if value, ok := container.Resources.Limits[v1.ResourceCPU]; ok { + containerAnnotation.CpuLimit = value.MilliValue() + } + + jsonAnnotation, _ := containerAnnotation.Serialize() + containerNameKey := fmt.Sprintf("%v%v", ContainerAnnotationPrefix, container.Name) + + newCPURequest := resource.NewMilliQuantity(cpuRequestInMilli*1000, cpuRequest.Format) + + pod.Annotations[containerNameKey] = string(jsonAnnotation) + container.Resources.Requests[GenerateResourceName(workloadName)] = *newCPURequest + container.Resources.Limits[GenerateResourceName(workloadName)] = *newCPURequest + + delete(container.Resources.Requests, v1.ResourceCPU) + delete(container.Resources.Limits, v1.ResourceCPU) + return nil + } + for idx := range pod.Spec.Containers { + if err := updateContainer(&pod.Spec.Containers[idx]); err != nil { + return err + } + } + for idx := range pod.Spec.InitContainers { + if err := updateContainer(&pod.Spec.InitContainers[idx]); err != nil { + return err + } + } + return nil +} + +// isPodGuaranteed checks if the pod has a guaranteed QoS. +// This QoS check is different from the library versions that check QoS, +// this is because of the order at which changes get observed. +// (i.e. the library assumes the defaulter has ran on the pod resource before calculating QoS). +// +// The files will get interpreted before they reach the API server and before the defaulter applies changes, +// this function takes into account the case where only `limits.cpu` are provided but no `requests.cpu` are since that +// counts as a Guaranteed QoS after the defaulter runs. +func isPodGuaranteed(pod *v1.Pod) bool { + isGuaranteed := func(containers []v1.Container) bool { + for _, c := range containers { + // only memory and CPU resources are relevant to decide pod QoS class + for _, r := range []v1.ResourceName{v1.ResourceMemory, v1.ResourceCPU} { + limit := c.Resources.Limits[r] + request, requestExist := c.Resources.Requests[r] + if limit.IsZero() { + return false + } + if !requestExist { + continue + } + // In some corner case, when you set CPU requests to 0 the k8s defaulter will change it to the value + // specified under the limit. + if r == v1.ResourceCPU && request.IsZero() { + continue + } + if !limit.Equal(request) { + return false + } + } + } + return true + } + return isGuaranteed(pod.Spec.InitContainers) && isGuaranteed(pod.Spec.Containers) +} + +func stripWorkloadAnnotations(annotations map[string]string) { + for k := range annotations { + if strings.HasPrefix(k, WorkloadsAnnotationPrefix) { + delete(annotations, k) + } + if strings.HasPrefix(k, ContainerAnnotationPrefix) { + delete(annotations, k) + } + } +} diff --git a/pkg/kubelet/managed/managed_test.go b/pkg/kubelet/managed/managed_test.go new file mode 100644 index 0000000000000..e4973f8a01db8 --- /dev/null +++ b/pkg/kubelet/managed/managed_test.go @@ -0,0 +1,811 @@ +package managed + +import ( + "fmt" + "testing" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestModifyStaticPodForPinnedManagementErrorStates(t *testing.T) { + + workloadAnnotations := map[string]string{ + "target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`, + } + + testCases := []struct { + pod *v1.Pod + expectedError error + }{ + { + pod: createPod(workloadAnnotations, nil, + &v1.Container{ + Name: "nginx", + Image: "test/image", + Resources: v1.ResourceRequirements{ + Requests: nil, + }, + }), + expectedError: fmt.Errorf("managed container nginx does not have Resource.Requests"), + }, + { + pod: createPod(workloadAnnotations, nil, + &v1.Container{ + Name: "nginx", + Image: "test/image", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + }, + }), + expectedError: fmt.Errorf("managed container nginx does not have cpu requests"), + }, + { + pod: createPod(workloadAnnotations, nil, + &v1.Container{ + Name: "nginx", + Image: "test/image", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("100m"), + }, + }, + }), + expectedError: fmt.Errorf("managed container nginx does not have memory requests"), + }, + { + pod: createPod(workloadAnnotations, + &v1.Container{ + Name: "nginx", + Image: "test/image", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + }, + }, nil), + expectedError: fmt.Errorf("managed container nginx does not have cpu requests"), + }, + { + pod: createPod(workloadAnnotations, + &v1.Container{ + Name: "nginx", + Image: "test/image", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("100m"), + }, + }, + }, nil), + expectedError: fmt.Errorf("managed container nginx does not have memory requests"), + }, + { + pod: createPod(nil, nil, + &v1.Container{ + Name: "nginx", + Image: "test/image", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + }, + }), + expectedError: fmt.Errorf("managed container nginx does not have cpu requests"), + }, + { + pod: createPod(map[string]string{"something": "else"}, nil, + &v1.Container{ + Name: "nginx", + Image: "test/image", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("100m"), + }, + }, + }), + expectedError: fmt.Errorf("managed container nginx does not have memory requests"), + }, + } + + for _, tc := range testCases { + pod, workloadName, err := ModifyStaticPodForPinnedManagement(tc.pod) + if err != nil && err.Error() != tc.expectedError.Error() { + t.Errorf("ModifyStaticPodForPinned got error of (%v) but expected (%v)", err, tc.expectedError) + } + if pod != nil { + t.Errorf("ModifyStaticPodForPinned should return pod with nil value") + } + if workloadName != "" { + t.Errorf("ModifyStaticPodForPinned should return empty workloadName but got %v", workloadName) + } + } +} + +func TestStaticPodManaged(t *testing.T) { + testCases := []struct { + pod *v1.Pod + expectedAnnotations map[string]string + isGuaranteed bool + }{ + { + pod: &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + UID: "12345", + Namespace: "mynamespace", + Annotations: map[string]string{ + "target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`, + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "nginx", + Image: "test/image", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("100m"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + }, + }, + }, + SecurityContext: &v1.PodSecurityContext{}, + }, + Status: v1.PodStatus{ + Phase: v1.PodPending, + }, + }, + expectedAnnotations: map[string]string{ + "target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`, + "resources.workload.openshift.io/nginx": `{"cpushares":102}`, + }, + }, + { + pod: &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + UID: "12345", + Namespace: "mynamespace", + Annotations: map[string]string{ + "target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`, + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "c1", + Image: "test/nginx", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("100m"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + }, + }, + { + Name: "c2", + Image: "test/image", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("1"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + }, + }, + { + Name: "c_3", + Image: "test/image", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("1"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + }, + }, + }, + SecurityContext: &v1.PodSecurityContext{}, + }, + Status: v1.PodStatus{ + Phase: v1.PodPending, + }, + }, + expectedAnnotations: map[string]string{ + "target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`, + "resources.workload.openshift.io/c1": `{"cpushares":102}`, + "resources.workload.openshift.io/c2": `{"cpushares":1024}`, + "resources.workload.openshift.io/c_3": `{"cpushares":1024}`, + }, + }, + { + pod: &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + UID: "12345", + Namespace: "mynamespace", + Annotations: map[string]string{ + "target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`, + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "c1", + Image: "test/nginx", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("20m"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + Limits: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("100m"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + }, + }, + }, + SecurityContext: &v1.PodSecurityContext{}, + }, + Status: v1.PodStatus{ + Phase: v1.PodPending, + }, + }, + expectedAnnotations: map[string]string{ + "target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`, + "resources.workload.openshift.io/c1": `{"cpushares":20,"cpulimit":100}`, + }, + }, + { + pod: &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + UID: "12345", + Namespace: "mynamespace", + Annotations: map[string]string{ + "target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`, + "resources.workload.openshift.io/c1": `{"cpushares":20}`, + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "c1", + Image: "test/nginx", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("100m"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + Limits: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("100m"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + }, + }, + }, + SecurityContext: &v1.PodSecurityContext{}, + }, + Status: v1.PodStatus{ + Phase: v1.PodPending, + }, + }, + expectedAnnotations: map[string]string{ + WorkloadAnnotationWarning: qosWarning, + }, + isGuaranteed: true, + }, + { + pod: &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + UID: "12345", + Namespace: "mynamespace", + Annotations: map[string]string{ + "target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`, + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "c1", + Image: "test/nginx", + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("100m"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + }, + }, + }, + SecurityContext: &v1.PodSecurityContext{}, + }, + Status: v1.PodStatus{ + Phase: v1.PodPending, + }, + }, + expectedAnnotations: map[string]string{ + WorkloadAnnotationWarning: qosWarning, + }, + isGuaranteed: true, + }, + { + pod: &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + UID: "12345", + Namespace: "mynamespace", + Annotations: map[string]string{ + "target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`, + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "c1", + Image: "test/nginx", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("0m"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + Limits: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("100m"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + }, + }, + }, + SecurityContext: &v1.PodSecurityContext{}, + }, + Status: v1.PodStatus{ + Phase: v1.PodPending, + }, + }, + expectedAnnotations: map[string]string{ + WorkloadAnnotationWarning: qosWarning, + }, + isGuaranteed: true, + }, + { + pod: &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + UID: "12345", + Namespace: "mynamespace", + Annotations: map[string]string{ + "target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`, + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "c1", + Image: "test/nginx", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("100m"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + Limits: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("200m"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + }, + }, + { + Name: "c2", + Image: "test/image", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("1"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + }, + }, + { + Name: "c_3", + Image: "test/image", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("1"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + Limits: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("1"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + }, + }, + }, + SecurityContext: &v1.PodSecurityContext{}, + }, + Status: v1.PodStatus{ + Phase: v1.PodPending, + }, + }, + expectedAnnotations: map[string]string{ + "target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`, + "resources.workload.openshift.io/c1": `{"cpushares":102,"cpulimit":200}`, + "resources.workload.openshift.io/c2": `{"cpushares":1024}`, + "resources.workload.openshift.io/c_3": `{"cpushares":1024,"cpulimit":1000}`, + }, + }, + } + + for _, tc := range testCases { + pod, workloadName, err := ModifyStaticPodForPinnedManagement(tc.pod) + if err != nil { + t.Errorf("ModifyStaticPodForPinned should not error") + } + for expectedKey, expectedValue := range tc.expectedAnnotations { + value, exists := pod.Annotations[expectedKey] + if !exists { + t.Errorf("%v key not found", expectedKey) + } + if expectedValue != value { + t.Errorf("'%v' key's value does not equal '%v' and got '%v'", expectedKey, expectedValue, value) + } + } + for _, container := range pod.Spec.Containers { + if container.Resources.Requests.Cpu().String() != "0" && !tc.isGuaranteed { + t.Errorf("cpu requests should be 0 got %v", container.Resources.Requests.Cpu().String()) + } + if container.Resources.Requests.Memory().String() == "0" && !tc.isGuaranteed { + t.Errorf("memory requests were %v but should be %v", container.Resources.Requests.Memory().String(), container.Resources.Requests.Memory().String()) + } + if _, exists := container.Resources.Requests[GenerateResourceName(workloadName)]; !exists && !tc.isGuaranteed { + t.Errorf("managed capacity label missing from pod %v and container %v", tc.pod.Name, container.Name) + } + if _, exists := container.Resources.Limits[GenerateResourceName(workloadName)]; !exists && !tc.isGuaranteed { + t.Errorf("managed capacity label missing from pod %v and container %v limits", tc.pod.Name, container.Name) + } + } + } +} + +func TestStaticPodThrottle(t *testing.T) { + testCases := []struct { + pod *v1.Pod + expectedAnnotations map[string]string + isGuaranteed bool + }{ + { + pod: &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + UID: "12345", + Namespace: "mynamespace", + Annotations: map[string]string{ + "target.workload.openshift.io/throttle": `{"effect": "PreferredDuringScheduling"}`, + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "nginx", + Image: "test/image", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("100m"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + }, + }, + }, + SecurityContext: &v1.PodSecurityContext{}, + }, + Status: v1.PodStatus{ + Phase: v1.PodPending, + }, + }, + expectedAnnotations: map[string]string{ + "target.workload.openshift.io/throttle": `{"effect": "PreferredDuringScheduling"}`, + "resources.workload.openshift.io/nginx": `{"cpushares":102}`, + }, + }, + { + pod: &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + UID: "12345", + Namespace: "mynamespace", + Annotations: map[string]string{ + "target.workload.openshift.io/throttle": `{"effect": "PreferredDuringScheduling"}`, + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "c1", + Image: "test/image", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("100m"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + }, + }, + { + Name: "c2", + Image: "test/image", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("1"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + }, + }, + { + Name: "c_3", + Image: "test/image", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("1"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + }, + }, + }, + SecurityContext: &v1.PodSecurityContext{}, + }, + Status: v1.PodStatus{ + Phase: v1.PodPending, + }, + }, + expectedAnnotations: map[string]string{ + "target.workload.openshift.io/throttle": `{"effect": "PreferredDuringScheduling"}`, + "resources.workload.openshift.io/c1": `{"cpushares":102}`, + "resources.workload.openshift.io/c2": `{"cpushares":1024}`, + "resources.workload.openshift.io/c_3": `{"cpushares":1024}`, + }, + }, + { + pod: &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + UID: "12345", + Namespace: "mynamespace", + Annotations: map[string]string{ + "target.workload.openshift.io/throttle": `{"effect": "PreferredDuringScheduling"}`, + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "c1", + Image: "test/nginx", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("100m"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + Limits: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("100m"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + }, + }, + }, + SecurityContext: &v1.PodSecurityContext{}, + }, + Status: v1.PodStatus{ + Phase: v1.PodPending, + }, + }, + expectedAnnotations: map[string]string{ + WorkloadAnnotationWarning: qosWarning, + }, + isGuaranteed: true, + }, + { + pod: &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + UID: "12345", + Namespace: "mynamespace", + Annotations: map[string]string{ + "target.workload.openshift.io/throttle": `{"effect": "PreferredDuringScheduling"}`, + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "c1", + Image: "test/nginx", + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("100m"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + }, + }, + }, + SecurityContext: &v1.PodSecurityContext{}, + }, + Status: v1.PodStatus{ + Phase: v1.PodPending, + }, + }, + expectedAnnotations: map[string]string{ + WorkloadAnnotationWarning: qosWarning, + }, + isGuaranteed: true, + }, + { + pod: &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + UID: "12345", + Namespace: "mynamespace", + Annotations: map[string]string{ + "target.workload.openshift.io/throttle": `{"effect": "PreferredDuringScheduling"}`, + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "c1", + Image: "test/image", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("100m"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + Limits: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("200m"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("200m"), + }, + }, + }, + { + Name: "c2", + Image: "test/image", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("1"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + Limits: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("2"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("200m"), + }, + }, + }, + { + Name: "c_3", + Image: "test/image", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("1"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("100m"), + }, + }, + }, + }, + SecurityContext: &v1.PodSecurityContext{}, + }, + Status: v1.PodStatus{ + Phase: v1.PodPending, + }, + }, + expectedAnnotations: map[string]string{ + "target.workload.openshift.io/throttle": `{"effect": "PreferredDuringScheduling"}`, + "resources.workload.openshift.io/c1": `{"cpushares":102,"cpulimit":200}`, + "resources.workload.openshift.io/c2": `{"cpushares":1024,"cpulimit":2000}`, + "resources.workload.openshift.io/c_3": `{"cpushares":1024}`, + }, + }, + } + + for _, tc := range testCases { + pod, workloadName, err := ModifyStaticPodForPinnedManagement(tc.pod) + if err != nil { + t.Errorf("ModifyStaticPodForPinned should not error") + } + for expectedKey, expectedValue := range tc.expectedAnnotations { + value, exists := pod.Annotations[expectedKey] + if !exists { + t.Errorf("%v key not found", expectedKey) + } + if expectedValue != value { + t.Errorf("'%v' key's value does not equal '%v' and got '%v'", expectedKey, expectedValue, value) + } + } + for _, container := range pod.Spec.Containers { + if container.Resources.Requests.Cpu().String() != "0" && !tc.isGuaranteed { + t.Errorf("cpu requests should be 0 got %v", container.Resources.Requests.Cpu().String()) + } + if container.Resources.Requests.Memory().String() == "0" && !tc.isGuaranteed { + t.Errorf("memory requests were %v but should be %v", container.Resources.Requests.Memory().String(), container.Resources.Requests.Memory().String()) + } + if _, exists := container.Resources.Requests[GenerateResourceName(workloadName)]; !exists && !tc.isGuaranteed { + t.Errorf("managed capacity label missing from pod %v and container %v", tc.pod.Name, container.Name) + } + if _, exists := container.Resources.Limits[GenerateResourceName(workloadName)]; !exists && !tc.isGuaranteed { + t.Errorf("managed limits capacity label missing from pod %v and container %v", tc.pod.Name, container.Name) + } + } + } +} + +func createPod(annotations map[string]string, initContainer, container *v1.Container) *v1.Pod { + pod := &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + UID: "12345", + Namespace: "mynamespace", + Annotations: annotations, + }, + Spec: v1.PodSpec{ + SecurityContext: &v1.PodSecurityContext{}, + }, + Status: v1.PodStatus{ + Phase: v1.PodPending, + }, + } + + if initContainer != nil { + pod.Spec.InitContainers = append(pod.Spec.InitContainers, *initContainer) + } + + if container != nil { + pod.Spec.Containers = append(pod.Spec.Containers, *container) + } + + return pod +} diff --git a/pkg/kubelet/nodestatus/setters.go b/pkg/kubelet/nodestatus/setters.go index dc55f4346eee4..5db76d38e6eec 100644 --- a/pkg/kubelet/nodestatus/setters.go +++ b/pkg/kubelet/nodestatus/setters.go @@ -23,6 +23,7 @@ import ( "net" goruntime "runtime" "strings" + "sync" "time" cadvisorapiv1 "github.com/google/cadvisor/info/v1" @@ -57,6 +58,9 @@ const ( // Setters may partially mutate the node before returning an error. type Setter func(ctx context.Context, node *v1.Node) error +// Only emit one reboot event +var rebootEvent sync.Once + // NodeAddress returns a Setter that updates address-related information on the node. func NodeAddress(nodeIPs []net.IP, // typically Kubelet.nodeIPs validateNodeIPFunc func(net.IP) error, // typically Kubelet.nodeIPValidator @@ -250,6 +254,7 @@ func hasAddressType(addresses []v1.NodeAddress, addressType v1.NodeAddressType) } return false } + func hasAddressValue(addresses []v1.NodeAddress, addressValue string) bool { for _, address := range addresses { if address.Address == addressValue { @@ -311,8 +316,12 @@ func MachineInfo(nodeName string, node.Status.NodeInfo.BootID != info.BootID { // TODO: This requires a transaction, either both node status is updated // and event is recorded or neither should happen, see issue #6055. - recordEventFunc(v1.EventTypeWarning, events.NodeRebooted, - fmt.Sprintf("Node %s has been rebooted, boot id: %s", nodeName, info.BootID)) + // + // Only emit one reboot event. recordEventFunc queues events and can emit many superfluous reboot events + rebootEvent.Do(func() { + recordEventFunc(v1.EventTypeWarning, events.NodeRebooted, + fmt.Sprintf("Node %s has been rebooted, boot id: %s", nodeName, info.BootID)) + }) } node.Status.NodeInfo.BootID = info.BootID diff --git a/pkg/kubelet/prober/patch_prober.go b/pkg/kubelet/prober/patch_prober.go new file mode 100644 index 0000000000000..02add8ae82512 --- /dev/null +++ b/pkg/kubelet/prober/patch_prober.go @@ -0,0 +1,56 @@ +package prober + +import ( + "net/http" + "strings" + "time" + + v1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/probe" + httpprobe "k8s.io/kubernetes/pkg/probe/http" +) + +func (pb *prober) maybeProbeForBody(prober httpprobe.Prober, req *http.Request, timeout time.Duration, pod *v1.Pod, container v1.Container, probeType probeType) (probe.Result, string, error) { + if !isInterestingPod(pod) { + return prober.Probe(req, timeout) + } + bodyProber, ok := prober.(httpprobe.DetailedProber) + if !ok { + return prober.Probe(req, timeout) + } + result, output, body, probeError := bodyProber.ProbeForBody(req, timeout) + switch result { + case probe.Success: + return result, output, probeError + case probe.Warning, probe.Failure, probe.Unknown: + // these pods are interesting enough to show the body content + klog.Infof("interesting pod/%s container/%s namespace/%s: %s probe status=%v output=%q start-of-body=%s", + pod.Name, container.Name, pod.Namespace, probeType, result, output, body) + + reason := "ProbeError" // this is the normal value + if pod.DeletionTimestamp != nil { + // If the container was sent a sig-term, we want to have a different reason so we can distinguish this in our + // monitoring and watching code. + // Pod delete does this, but there are other possible reasons as well. We'll start with pod delete to improve the state of the world. + reason = "TerminatingPodProbeError" + } + + // in fact, they are so interesting we'll try to send events for them + pb.recordContainerEvent(pod, &container, v1.EventTypeWarning, reason, "%s probe error: %s\nbody: %s\n", probeType, output, body) + return result, output, probeError + default: + return result, output, probeError + } +} + +func isInterestingPod(pod *v1.Pod) bool { + if pod == nil { + return false + } + if strings.HasPrefix(pod.Namespace, "openshift-") { + return true + } + + return false +} diff --git a/pkg/kubelet/prober/prober.go b/pkg/kubelet/prober/prober.go index c1936db2efa7a..96b3913bd7664 100644 --- a/pkg/kubelet/prober/prober.go +++ b/pkg/kubelet/prober/prober.go @@ -154,7 +154,7 @@ func (pb *prober) runProbe(ctx context.Context, probeType probeType, p *v1.Probe headers := p.HTTPGet.HTTPHeaders klogV4.InfoS("HTTP-Probe", "scheme", scheme, "host", host, "port", port, "path", path, "timeout", timeout, "headers", headers) } - return pb.http.Probe(req, timeout) + return pb.maybeProbeForBody(pb.http, req, timeout, pod, container, probeType) case p.TCPSocket != nil: port, err := probe.ResolveContainerPort(p.TCPSocket.Port, &container) diff --git a/pkg/kubelet/server/server.go b/pkg/kubelet/server/server.go index 46ac2403d1d60..b30c6fc7d0de6 100644 --- a/pkg/kubelet/server/server.go +++ b/pkg/kubelet/server/server.go @@ -1118,7 +1118,7 @@ var statusesNoTracePred = httplog.StatusIsNot( // ServeHTTP responds to HTTP requests on the Kubelet. func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { - handler := httplog.WithLogging(s.restfulCont, statusesNoTracePred) + handler := httplog.WithLogging(s.restfulCont, statusesNoTracePred, nil) // monitor http requests var serverType string diff --git a/pkg/kubelet/sharedcpus/sharedcpus.go b/pkg/kubelet/sharedcpus/sharedcpus.go new file mode 100644 index 0000000000000..ef4a35c476acb --- /dev/null +++ b/pkg/kubelet/sharedcpus/sharedcpus.go @@ -0,0 +1,87 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sharedcpus + +import ( + "encoding/json" + "errors" + "os" + + corev1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" +) + +const ( + configFileName = "/etc/kubernetes/openshift-workload-mixed-cpus" + sharedCpusResourceName = "workload.openshift.io/enable-shared-cpus" +) + +var ( + config Config + sharedCpusEnabled bool +) + +type Config struct { + sharedCpus `json:"shared_cpus"` +} + +type sharedCpus struct { + // ContainersLimit specify the number of containers that are allowed to access the shared CPU pool` + ContainersLimit int64 `json:"containers_limit"` +} + +func init() { + parseConfig() +} + +func IsEnabled() bool { + return sharedCpusEnabled +} + +func GetResourceName() corev1.ResourceName { + return sharedCpusResourceName +} + +func GetConfig() Config { + return config +} + +func parseConfig() { + b, err := os.ReadFile(configFileName) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return + } + klog.ErrorS(err, "Failed to read configuration file for shared cpus", "fileName", configFileName) + return + } + cfg, err := parseConfigData(b) + if err != nil { + return + } + config = *cfg + sharedCpusEnabled = true +} + +func parseConfigData(data []byte) (*Config, error) { + cfg := &Config{} + err := json.Unmarshal(data, cfg) + if err != nil { + klog.ErrorS(err, "Failed to parse configuration file for shared cpus", "fileContent", string(data)) + } + return cfg, err +} diff --git a/pkg/kubelet/sharedcpus/sharedcpus_test.go b/pkg/kubelet/sharedcpus/sharedcpus_test.go new file mode 100644 index 0000000000000..63e7914f0fff1 --- /dev/null +++ b/pkg/kubelet/sharedcpus/sharedcpus_test.go @@ -0,0 +1,39 @@ +package sharedcpus + +import "testing" + +func TestParseConfigData(t *testing.T) { + testCases := []struct { + data []byte + expectedToBeParsed bool + containerLimitValue int64 + }{ + { + data: []byte(`{ + "shared_cpus": { + "containers_limit": 15 + } + }`), + expectedToBeParsed: true, + containerLimitValue: 15, + }, + { + data: []byte(`{ + "shared_cpus": { + "abc": "25" + } + }`), + expectedToBeParsed: false, + containerLimitValue: 0, + }, + } + for _, tc := range testCases { + cfg, err := parseConfigData(tc.data) + if err != nil && tc.expectedToBeParsed { + t.Errorf("shared cpus data expected to be parsed") + } + if cfg.ContainersLimit != tc.containerLimitValue { + t.Errorf("shared cpus ContainersLimit is different than expected: want: %d; got %d", tc.containerLimitValue, cfg.ContainersLimit) + } + } +} diff --git a/pkg/probe/http/http.go b/pkg/probe/http/http.go index 20e33da8ed478..d74c6776e000e 100644 --- a/pkg/probe/http/http.go +++ b/pkg/probe/http/http.go @@ -78,7 +78,8 @@ func (pr httpProber) Probe(req *http.Request, timeout time.Duration) (probe.Resu Transport: pr.transport, CheckRedirect: RedirectChecker(pr.followNonLocalRedirects), } - return DoHTTPProbe(req, client) + result, details, _, err := DoHTTPProbe(req, client) + return result, details, err } // GetHTTPInterface is an interface for making HTTP requests, that returns a response and error. @@ -90,13 +91,13 @@ type GetHTTPInterface interface { // If the HTTP response code is successful (i.e. 400 > code >= 200), it returns Success. // If the HTTP response code is unsuccessful or HTTP communication fails, it returns Failure. // This is exported because some other packages may want to do direct HTTP probes. -func DoHTTPProbe(req *http.Request, client GetHTTPInterface) (probe.Result, string, error) { +func DoHTTPProbe(req *http.Request, client GetHTTPInterface) (probe.Result, string, string, error) { url := req.URL headers := req.Header res, err := client.Do(req) if err != nil { // Convert errors into failures to catch timeouts. - return probe.Failure, err.Error(), nil + return probe.Failure, err.Error(), "", nil } defer res.Body.Close() b, err := utilio.ReadAtMost(res.Body, maxRespBodyLength) @@ -104,22 +105,23 @@ func DoHTTPProbe(req *http.Request, client GetHTTPInterface) (probe.Result, stri if err == utilio.ErrLimitReached { klog.V(4).Infof("Non fatal body truncation for %s, Response: %v", url.String(), *res) } else { - return probe.Failure, "", err + return probe.Failure, "", "", err } } body := string(b) if res.StatusCode >= http.StatusOK && res.StatusCode < http.StatusBadRequest { if res.StatusCode >= http.StatusMultipleChoices { // Redirect klog.V(4).Infof("Probe terminated redirects for %s, Response: %v", url.String(), *res) - return probe.Warning, fmt.Sprintf("Probe terminated redirects, Response body: %v", body), nil + return probe.Warning, fmt.Sprintf("Probe terminated redirects, Response body: %v", body), body, nil } klog.V(4).Infof("Probe succeeded for %s, Response: %v", url.String(), *res) - return probe.Success, body, nil + return probe.Success, body, body, nil } klog.V(4).Infof("Probe failed for %s with request headers %v, response body: %v", url.String(), headers, body) // Note: Until https://issue.k8s.io/99425 is addressed, this user-facing failure message must not contain the response body. + // @deads2k recommended we return the body. Slack discussion: https://redhat-internal.slack.com/archives/C04UQLWQAP3/p1679590747021409 failureMsg := fmt.Sprintf("HTTP probe failed with statuscode: %d", res.StatusCode) - return probe.Failure, failureMsg, nil + return probe.Failure, failureMsg, body, nil } // RedirectChecker returns a function that can be used to check HTTP redirects. diff --git a/pkg/probe/http/patch_http.go b/pkg/probe/http/patch_http.go new file mode 100644 index 0000000000000..71648a79f64b1 --- /dev/null +++ b/pkg/probe/http/patch_http.go @@ -0,0 +1,25 @@ +package http + +import ( + "net/http" + "time" + + "k8s.io/kubernetes/pkg/probe" +) + +// Prober is an interface that defines the Probe function for doing HTTP readiness/liveness checks. +type DetailedProber interface { + ProbeForBody(req *http.Request, timeout time.Duration) (probe.Result, string, string, error) +} + +// ProbeForBody returns a ProbeRunner capable of running an HTTP check. +// returns result, details, body, error +func (pr httpProber) ProbeForBody(req *http.Request, timeout time.Duration) (probe.Result, string, string, error) { + pr.transport.DisableCompression = true // removes Accept-Encoding header + client := &http.Client{ + Timeout: timeout, + Transport: pr.transport, + CheckRedirect: RedirectChecker(pr.followNonLocalRedirects), + } + return DoHTTPProbe(req, client) +} diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index 086c4f30bb409..b357be8e856ec 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -1000,6 +1000,21 @@ func (proxier *Proxier) syncProxyRules() { allEndpoints := proxier.endpointsMap[svcName] clusterEndpoints, localEndpoints, allLocallyReachableEndpoints, hasEndpoints := proxy.CategorizeEndpoints(allEndpoints, svcInfo, proxier.nodeLabels) + // Prefer local endpoint for the DNS service. + // Fixes . + // TODO: Delete this once node-level topology is + // implemented and the DNS operator is updated to use it. + if svcPortNameString == "openshift-dns/dns-default:dns" || svcPortNameString == "openshift-dns/dns-default:dns-tcp" { + for _, ep := range clusterEndpoints { + if ep.IsLocal() { + klog.V(4).Infof("Found a local endpoint %q for service %q; preferring the local endpoint and ignoring %d other endpoints", ep.String(), svcPortNameString, len(clusterEndpoints)-1) + clusterEndpoints = []proxy.Endpoint{ep} + allLocallyReachableEndpoints = clusterEndpoints + break + } + } + } + // clusterPolicyChain contains the endpoints used with "Cluster" traffic policy clusterPolicyChain := svcInfo.clusterPolicyChainName usesClusterPolicyChain := len(clusterEndpoints) > 0 && svcInfo.UsesClusterEndpoints() diff --git a/pkg/proxy/iptables/proxier_test.go b/pkg/proxy/iptables/proxier_test.go index b42490764a8ce..17a852fcd2a32 100644 --- a/pkg/proxy/iptables/proxier_test.go +++ b/pkg/proxy/iptables/proxier_test.go @@ -2081,6 +2081,173 @@ func TestClusterIPGeneral(t *testing.T) { }) } +func TestOpenShiftDNSHackTCP(t *testing.T) { + ipt := iptablestest.NewFake() + fp := NewFakeProxier(ipt) + svcIP := "172.30.0.10" + svcPort := 53 + podPort := 5353 + svcPortName := proxy.ServicePortName{ + NamespacedName: makeNSN("openshift-dns", "dns-default"), + Port: "dns-tcp", + Protocol: v1.ProtocolTCP, + } + + makeServiceMap(fp, + makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { + svc.Spec.ClusterIP = svcIP + svc.Spec.Ports = []v1.ServicePort{{ + Name: svcPortName.Port, + Port: int32(svcPort), + Protocol: svcPortName.Protocol, + }} + }), + ) + + populateEndpointSlices(fp, + makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { + eps.AddressType = discovery.AddressTypeIPv4 + eps.Endpoints = []discovery.Endpoint{{ + // This endpoint is ignored because it's remote + Addresses: []string{"10.180.0.2"}, + NodeName: ptr.To("node2"), + }, { + Addresses: []string{"10.180.0.1"}, + NodeName: ptr.To(testHostname), + }} + eps.Ports = []discovery.EndpointPort{{ + Name: ptr.To(svcPortName.Port), + Port: ptr.To[int32](int32(podPort)), + Protocol: &svcPortName.Protocol, + }} + }), + ) + + fp.syncProxyRules() + + runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{ + { + name: "TCP DNS only goes to local endpoint", + sourceIP: "10.0.0.2", + destIP: "172.30.0.10", + destPort: 53, + output: "10.180.0.1:5353", + }, + }) +} + +func TestOpenShiftDNSHackUDP(t *testing.T) { + ipt := iptablestest.NewFake() + fp := NewFakeProxier(ipt) + svcIP := "172.30.0.10" + svcPort := 53 + podPort := 5353 + svcPortName := proxy.ServicePortName{ + NamespacedName: makeNSN("openshift-dns", "dns-default"), + Port: "dns", + Protocol: v1.ProtocolUDP, + } + + makeServiceMap(fp, + makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { + svc.Spec.ClusterIP = svcIP + svc.Spec.Ports = []v1.ServicePort{{ + Name: svcPortName.Port, + Port: int32(svcPort), + Protocol: svcPortName.Protocol, + }} + }), + ) + + populateEndpointSlices(fp, + makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { + eps.AddressType = discovery.AddressTypeIPv4 + eps.Endpoints = []discovery.Endpoint{{ + // This endpoint is ignored because it's remote + Addresses: []string{"10.180.0.2"}, + NodeName: ptr.To("node2"), + }, { + Addresses: []string{"10.180.0.1"}, + NodeName: ptr.To(testHostname), + }} + eps.Ports = []discovery.EndpointPort{{ + Name: ptr.To(svcPortName.Port), + Port: ptr.To[int32](int32(podPort)), + Protocol: &svcPortName.Protocol, + }} + }), + ) + + fp.syncProxyRules() + + runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{ + { + name: "UDP DNS only goes to local endpoint", + sourceIP: "10.0.0.2", + protocol: v1.ProtocolUDP, + destIP: "172.30.0.10", + destPort: 53, + output: "10.180.0.1:5353", + }, + }) +} + +func TestOpenShiftDNSHackFallback(t *testing.T) { + ipt := iptablestest.NewFake() + fp := NewFakeProxier(ipt) + svcIP := "172.30.0.10" + svcPort := 53 + podPort := 5353 + svcPortName := proxy.ServicePortName{ + NamespacedName: makeNSN("openshift-dns", "dns-default"), + Port: "dns", + Protocol: v1.ProtocolUDP, + } + + makeServiceMap(fp, + makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { + svc.Spec.ClusterIP = svcIP + svc.Spec.Ports = []v1.ServicePort{{ + Name: svcPortName.Port, + Port: int32(svcPort), + Protocol: svcPortName.Protocol, + }} + }), + ) + + populateEndpointSlices(fp, + makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { + eps.AddressType = discovery.AddressTypeIPv4 + // Both endpoints are used because neither is local + eps.Endpoints = []discovery.Endpoint{{ + Addresses: []string{"10.180.1.2"}, + NodeName: ptr.To("node2"), + }, { + Addresses: []string{"10.180.2.3"}, + NodeName: ptr.To("node3"), + }} + eps.Ports = []discovery.EndpointPort{{ + Name: ptr.To(svcPortName.Port), + Port: ptr.To[int32](int32(podPort)), + Protocol: &svcPortName.Protocol, + }} + }), + ) + + fp.syncProxyRules() + + runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{ + { + name: "DNS goes to all endpoints when none are local", + sourceIP: "10.0.0.2", + protocol: v1.ProtocolUDP, + destIP: "172.30.0.10", + destPort: 53, + output: "10.180.1.2:5353, 10.180.2.3:5353", + }, + }) +} + func TestLoadBalancer(t *testing.T) { ipt := iptablestest.NewFake() fp := NewFakeProxier(ipt) diff --git a/pkg/quota/v1/install/patch_ignoredresources.go b/pkg/quota/v1/install/patch_ignoredresources.go new file mode 100644 index 0000000000000..bf246aaccb1c7 --- /dev/null +++ b/pkg/quota/v1/install/patch_ignoredresources.go @@ -0,0 +1,33 @@ +package install + +import "k8s.io/apimachinery/pkg/runtime/schema" + +func init() { + newIgnoredResources := map[schema.GroupResource]struct{}{ + {Group: "extensions", Resource: "networkpolicies"}: {}, + {Group: "", Resource: "bindings"}: {}, + {Group: "", Resource: "componentstatuses"}: {}, + {Group: "", Resource: "events"}: {}, + {Group: "authentication.k8s.io", Resource: "tokenreviews"}: {}, + {Group: "authorization.k8s.io", Resource: "subjectaccessreviews"}: {}, + {Group: "authorization.k8s.io", Resource: "selfsubjectaccessreviews"}: {}, + {Group: "authorization.k8s.io", Resource: "localsubjectaccessreviews"}: {}, + {Group: "authorization.k8s.io", Resource: "selfsubjectrulesreviews"}: {}, + {Group: "authorization.openshift.io", Resource: "selfsubjectaccessreviews"}: {}, + {Group: "authorization.openshift.io", Resource: "subjectaccessreviews"}: {}, + {Group: "authorization.openshift.io", Resource: "localsubjectaccessreviews"}: {}, + {Group: "authorization.openshift.io", Resource: "resourceaccessreviews"}: {}, + {Group: "authorization.openshift.io", Resource: "localresourceaccessreviews"}: {}, + {Group: "authorization.openshift.io", Resource: "selfsubjectrulesreviews"}: {}, + {Group: "authorization.openshift.io", Resource: "subjectrulesreviews"}: {}, + {Group: "authorization.openshift.io", Resource: "roles"}: {}, + {Group: "authorization.openshift.io", Resource: "rolebindings"}: {}, + {Group: "authorization.openshift.io", Resource: "clusterroles"}: {}, + {Group: "authorization.openshift.io", Resource: "clusterrolebindings"}: {}, + {Group: "apiregistration.k8s.io", Resource: "apiservices"}: {}, + {Group: "apiextensions.k8s.io", Resource: "customresourcedefinitions"}: {}, + } + for k, v := range newIgnoredResources { + ignoredResources[k] = v + } +} diff --git a/pkg/registry/authorization/selfsubjectaccessreview/rest.go b/pkg/registry/authorization/selfsubjectaccessreview/rest.go index a64a84cabfa40..c2e48ed8f1d7b 100644 --- a/pkg/registry/authorization/selfsubjectaccessreview/rest.go +++ b/pkg/registry/authorization/selfsubjectaccessreview/rest.go @@ -87,6 +87,9 @@ func (r *REST) Create(ctx context.Context, obj runtime.Object, createValidation } } + // when using a scoped token, set the required scopes to perform the self SAR if any is missing + userToCheck = userWithRequiredScopes(userToCheck) + var authorizationAttributes authorizer.AttributesRecord if selfSAR.Spec.ResourceAttributes != nil { authorizationAttributes = authorizationutil.ResourceAttributesFrom(userToCheck, *selfSAR.Spec.ResourceAttributes) diff --git a/pkg/registry/authorization/selfsubjectaccessreview/rest_patch.go b/pkg/registry/authorization/selfsubjectaccessreview/rest_patch.go new file mode 100644 index 0000000000000..1b13327285e89 --- /dev/null +++ b/pkg/registry/authorization/selfsubjectaccessreview/rest_patch.go @@ -0,0 +1,55 @@ +package selfsubjectaccessreview + +import ( + "reflect" + "sort" + + "k8s.io/apiserver/pkg/authentication/user" + + authorizationv1 "github.com/openshift/api/authorization/v1" + authorizationscope "github.com/openshift/apiserver-library-go/pkg/authorization/scope" +) + +func userWithRequiredScopes(userToCheck user.Info) user.Info { + userExtra := userToCheck.GetExtra() + if userExtra == nil || !scopesNeedUserFull(userExtra[authorizationv1.ScopesKey]) { + return userToCheck + } + + userExtraCopy := make(map[string][]string) + for k, v := range userExtra { + userExtraCopy[k] = v + } + userExtraCopy[authorizationv1.ScopesKey] = append(userExtraCopy[authorizationv1.ScopesKey], authorizationscope.UserFull) + + userWithFullScope := &user.DefaultInfo{ + Name: userToCheck.GetName(), + UID: userToCheck.GetUID(), + Groups: userToCheck.GetGroups(), + Extra: userExtraCopy, + } + + return userWithFullScope +} + +// a self-SAR request must be authorized as if it has either the full user's permissions +// or the permissions of the user's role set on the request (if applicable) in order +// to be able to perform the access review +func scopesNeedUserFull(scopes []string) bool { + if len(scopes) == 0 { + return false + } + + sort.Strings(scopes) + switch { + case + // all scope slices used here must be sorted + reflect.DeepEqual(scopes, []string{authorizationscope.UserAccessCheck}), + reflect.DeepEqual(scopes, []string{authorizationscope.UserAccessCheck, authorizationscope.UserInfo}), + reflect.DeepEqual(scopes, []string{authorizationscope.UserAccessCheck, authorizationscope.UserListAllProjects}), + reflect.DeepEqual(scopes, []string{authorizationscope.UserAccessCheck, authorizationscope.UserInfo, authorizationscope.UserListAllProjects}): + return true + } + + return false +} diff --git a/pkg/registry/authorization/selfsubjectaccessreview/rest_patch_test.go b/pkg/registry/authorization/selfsubjectaccessreview/rest_patch_test.go new file mode 100644 index 0000000000000..05a13d521311f --- /dev/null +++ b/pkg/registry/authorization/selfsubjectaccessreview/rest_patch_test.go @@ -0,0 +1,55 @@ +package selfsubjectaccessreview + +import ( + "testing" + + authorizationscope "github.com/openshift/apiserver-library-go/pkg/authorization/scope" +) + +func TestScopesNeedUserFull(t *testing.T) { + roleScope := "role:testrole:testns" + tests := []struct { + want bool + scopes []string + }{ + {true, []string{authorizationscope.UserAccessCheck}}, + {true, []string{authorizationscope.UserInfo, authorizationscope.UserAccessCheck}}, + {true, []string{authorizationscope.UserListAllProjects, authorizationscope.UserAccessCheck}}, + {true, []string{authorizationscope.UserListAllProjects, authorizationscope.UserInfo, authorizationscope.UserAccessCheck}}, + {false, nil}, + {false, []string{}}, + {false, []string{authorizationscope.UserInfo}}, + {false, []string{authorizationscope.UserListAllProjects}}, + {false, []string{authorizationscope.UserFull}}, + {false, []string{roleScope}}, + {false, []string{authorizationscope.UserAccessCheck, authorizationscope.UserFull}}, + {false, []string{authorizationscope.UserAccessCheck, roleScope}}, + {false, []string{authorizationscope.UserInfo, authorizationscope.UserListAllProjects}}, + {false, []string{authorizationscope.UserInfo, authorizationscope.UserFull}}, + {false, []string{authorizationscope.UserInfo, roleScope}}, + {false, []string{authorizationscope.UserListAllProjects, authorizationscope.UserFull}}, + {false, []string{authorizationscope.UserListAllProjects, roleScope}}, + {false, []string{authorizationscope.UserFull, roleScope}}, + {false, []string{authorizationscope.UserAccessCheck, authorizationscope.UserInfo, authorizationscope.UserFull}}, + {false, []string{authorizationscope.UserAccessCheck, authorizationscope.UserInfo, roleScope}}, + {false, []string{authorizationscope.UserAccessCheck, authorizationscope.UserListAllProjects, authorizationscope.UserFull}}, + {false, []string{authorizationscope.UserAccessCheck, authorizationscope.UserListAllProjects, roleScope}}, + {false, []string{authorizationscope.UserAccessCheck, authorizationscope.UserFull, roleScope}}, + {false, []string{authorizationscope.UserInfo, authorizationscope.UserListAllProjects, authorizationscope.UserFull}}, + {false, []string{authorizationscope.UserInfo, authorizationscope.UserListAllProjects, roleScope}}, + {false, []string{authorizationscope.UserInfo, authorizationscope.UserFull, roleScope}}, + {false, []string{authorizationscope.UserListAllProjects, authorizationscope.UserFull, roleScope}}, + {false, []string{authorizationscope.UserAccessCheck, authorizationscope.UserInfo, authorizationscope.UserListAllProjects, authorizationscope.UserFull}}, + {false, []string{authorizationscope.UserAccessCheck, authorizationscope.UserInfo, authorizationscope.UserListAllProjects, roleScope}}, + {false, []string{authorizationscope.UserAccessCheck, authorizationscope.UserInfo, authorizationscope.UserFull, roleScope}}, + {false, []string{authorizationscope.UserAccessCheck, authorizationscope.UserListAllProjects, authorizationscope.UserFull, roleScope}}, + {false, []string{authorizationscope.UserInfo, authorizationscope.UserListAllProjects, authorizationscope.UserFull, roleScope}}, + {false, []string{authorizationscope.UserAccessCheck, authorizationscope.UserInfo, authorizationscope.UserListAllProjects, authorizationscope.UserFull, roleScope}}, + } + + for _, tt := range tests { + if got := scopesNeedUserFull(tt.scopes); got != tt.want { + t.Errorf("scopes %v; got %v; want %v", tt.scopes, got, tt.want) + } + } +} diff --git a/pkg/registry/authorization/selfsubjectaccessreview/rest_test.go b/pkg/registry/authorization/selfsubjectaccessreview/rest_test.go new file mode 100644 index 0000000000000..31ac1d5c5e91d --- /dev/null +++ b/pkg/registry/authorization/selfsubjectaccessreview/rest_test.go @@ -0,0 +1,136 @@ +package selfsubjectaccessreview + +import ( + "context" + "reflect" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/registry/rest" + + authorizationv1 "github.com/openshift/api/authorization/v1" + authorizationscope "github.com/openshift/apiserver-library-go/pkg/authorization/scope" + + authorizationapi "k8s.io/kubernetes/pkg/apis/authorization" +) + +type fakeAuthorizer struct { + attrs authorizer.Attributes +} + +func (f *fakeAuthorizer) Authorize(ctx context.Context, attrs authorizer.Attributes) (authorizer.Decision, string, error) { + f.attrs = attrs + return authorizer.DecisionNoOpinion, "", nil +} + +func TestCreate(t *testing.T) { + userNilExtra := &user.DefaultInfo{} + + userNoExtra := &user.DefaultInfo{ + Extra: make(map[string][]string), + } + + userNoScopes := &user.DefaultInfo{ + Extra: map[string][]string{ + "extra": {"ex1", "ex2"}, + }, + } + + userWithScopesNoCheckAccess := &user.DefaultInfo{ + Extra: map[string][]string{ + "extra": {"ex1", "ex2"}, + authorizationv1.ScopesKey: { + authorizationscope.UserInfo, + authorizationscope.UserListAllProjects, + }, + }, + } + + userWithScopesWithCheckAccess := &user.DefaultInfo{ + Extra: map[string][]string{ + "extra": {"ex1", "ex2"}, + authorizationv1.ScopesKey: { + authorizationscope.UserAccessCheck, + authorizationscope.UserInfo, + }, + }, + } + + userWithScopeUserFull := &user.DefaultInfo{ + Extra: map[string][]string{ + "extra": {"ex1", "ex2"}, + authorizationv1.ScopesKey: { + authorizationscope.UserAccessCheck, + authorizationscope.UserInfo, + authorizationscope.UserFull, + }, + }, + } + + userWithRoleScope := &user.DefaultInfo{ + Extra: map[string][]string{ + "extra": {"ex1", "ex2"}, + authorizationv1.ScopesKey: { + authorizationscope.UserAccessCheck, + "role:testrole:testns", + }, + }, + } + + testcases := map[string]struct { + user user.Info + expectedUser user.Info + }{ + "nil extra": { + user: userNilExtra, + expectedUser: userNilExtra, + }, + + "no extra": { + user: userNoExtra, + expectedUser: userNoExtra, + }, + + "no scopes": { + user: userNoScopes, + expectedUser: userNoScopes, + }, + + "scopes exclude user:check-access": { + user: userWithScopesNoCheckAccess, + expectedUser: userWithScopesNoCheckAccess, + }, + + "scopes include user:check-access": { + user: userWithScopesWithCheckAccess, + expectedUser: userWithScopeUserFull, + }, + + "scopes include role scope": { + user: userWithRoleScope, + expectedUser: userWithRoleScope, + }, + } + + for k, tc := range testcases { + auth := &fakeAuthorizer{} + storage := NewREST(auth) + spec := authorizationapi.SelfSubjectAccessReviewSpec{ + NonResourceAttributes: &authorizationapi.NonResourceAttributes{Verb: "get", Path: "/mypath"}, + } + + ctx := genericapirequest.WithUser(genericapirequest.NewContext(), tc.user) + _, err := storage.Create(ctx, &authorizationapi.SelfSubjectAccessReview{Spec: spec}, rest.ValidateAllObjectFunc, &metav1.CreateOptions{}) + if err != nil { + t.Errorf("%s: %v", k, err) + continue + } + + if !reflect.DeepEqual(auth.attrs.GetUser(), tc.expectedUser) { + t.Errorf("%s: expected\n%#v\ngot\n%#v", k, tc.expectedUser, auth.attrs.GetUser()) + } + } +} diff --git a/plugin/pkg/admission/noderestriction/admission.go b/plugin/pkg/admission/noderestriction/admission.go index 81fdfafdf22a0..86265037f9d75 100644 --- a/plugin/pkg/admission/noderestriction/admission.go +++ b/plugin/pkg/admission/noderestriction/admission.go @@ -486,7 +486,7 @@ func (p *Plugin) admitNode(nodeName string, a admission.Attributes) error { // Don't allow a node to register with labels outside the allowed set. // This would allow a node to add or modify its labels in a way that would let it steer privileged workloads to itself. modifiedLabels := getModifiedLabels(node.Labels, nil) - if forbiddenLabels := p.getForbiddenLabels(modifiedLabels); len(forbiddenLabels) > 0 { + if forbiddenLabels := p.getForbiddenLabels(modifiedLabels, a.GetOperation()); len(forbiddenLabels) > 0 { return admission.NewForbidden(a, fmt.Errorf("node %q is not allowed to set the following labels: %s", nodeName, strings.Join(forbiddenLabels.List(), ", "))) } } @@ -517,9 +517,10 @@ func (p *Plugin) admitNode(nodeName string, a admission.Attributes) error { // Don't allow a node to update labels outside the allowed set. // This would allow a node to add or modify its labels in a way that would let it steer privileged workloads to itself. modifiedLabels := getModifiedLabels(node.Labels, oldNode.Labels) - if forbiddenUpdateLabels := p.getForbiddenLabels(modifiedLabels); len(forbiddenUpdateLabels) > 0 { + if forbiddenUpdateLabels := p.getForbiddenLabels(modifiedLabels, a.GetOperation()); len(forbiddenUpdateLabels) > 0 { return admission.NewForbidden(a, fmt.Errorf("is not allowed to modify labels: %s", strings.Join(forbiddenUpdateLabels.List(), ", "))) } + } return nil @@ -560,7 +561,7 @@ func getLabelNamespace(key string) string { } // getForbiddenLabels returns the set of labels that may not be added, removed, or modified by the node on create or update. -func (p *Plugin) getForbiddenLabels(modifiedLabels sets.String) sets.String { +func (p *Plugin) getForbiddenLabels(modifiedLabels sets.String, admissionOpn admission.Operation) sets.String { if len(modifiedLabels) == 0 { return nil } @@ -575,6 +576,11 @@ func (p *Plugin) getForbiddenLabels(modifiedLabels sets.String) sets.String { // forbid kubelets from setting unknown kubernetes.io and k8s.io labels on update if isKubernetesLabel(label) && !kubeletapis.IsKubeletLabel(label) { // TODO: defer to label policy once available + if admissionOpn == admission.Create { + if kubeletapis.IsForbiddenOpenshiftLabel(label) { + continue + } + } forbiddenLabels.Insert(label) } } diff --git a/plugin/pkg/admission/security/podsecurity/admission.go b/plugin/pkg/admission/security/podsecurity/admission.go index e4b55cb490818..d30dae9a7eb98 100644 --- a/plugin/pkg/admission/security/podsecurity/admission.go +++ b/plugin/pkg/admission/security/podsecurity/admission.go @@ -115,7 +115,7 @@ func newPlugin(reader io.Reader) (*Plugin, error) { Configuration: config, Evaluator: evaluator, Metrics: getDefaultRecorder(), - PodSpecExtractor: podsecurityadmission.DefaultPodSpecExtractor{}, + PodSpecExtractor: SCCMutatingPodSpecExtractorInstance, }, }, nil } diff --git a/plugin/pkg/admission/security/podsecurity/patch_podspecextractor.go b/plugin/pkg/admission/security/podsecurity/patch_podspecextractor.go new file mode 100644 index 0000000000000..fb8b8488a6eb9 --- /dev/null +++ b/plugin/pkg/admission/security/podsecurity/patch_podspecextractor.go @@ -0,0 +1,111 @@ +package podsecurity + +import ( + "context" + "fmt" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/serviceaccount" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/apis/core" + v1 "k8s.io/kubernetes/pkg/apis/core/v1" + saadmission "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount" + podsecurityadmission "k8s.io/pod-security-admission/admission" +) + +type SCCMutatingPodSpecExtractor struct { + sccAdmission admission.MutationInterface + delegate podsecurityadmission.PodSpecExtractor +} + +var SCCMutatingPodSpecExtractorInstance = &SCCMutatingPodSpecExtractor{ + delegate: podsecurityadmission.DefaultPodSpecExtractor{}, +} + +func (s *SCCMutatingPodSpecExtractor) SetSCCAdmission(sccAdmission admission.MutationInterface) { + s.sccAdmission = sccAdmission +} + +func (s *SCCMutatingPodSpecExtractor) HasPodSpec(gr schema.GroupResource) bool { + return s.delegate.HasPodSpec(gr) +} + +func (s *SCCMutatingPodSpecExtractor) ExtractPodSpec(obj runtime.Object) (*metav1.ObjectMeta, *corev1.PodSpec, error) { + if s.sccAdmission == nil { + return s.delegate.ExtractPodSpec(obj) + } + + switch obj := obj.(type) { + case *corev1.Pod: + return s.delegate.ExtractPodSpec(obj) + } + + podTemplateMeta, originalPodSpec, err := s.delegate.ExtractPodSpec(obj) + if err != nil { + return podTemplateMeta, originalPodSpec, err + } + if originalPodSpec == nil { + return nil, nil, nil + } + objectMeta, err := meta.Accessor(obj) + if err != nil { + return podTemplateMeta, originalPodSpec, fmt.Errorf("unable to get metadata for SCC mutation: %w", err) + } + + pod := &corev1.Pod{ + ObjectMeta: *podTemplateMeta.DeepCopy(), + Spec: *originalPodSpec.DeepCopy(), + } + if len(pod.Namespace) == 0 { + pod.Namespace = objectMeta.GetNamespace() + } + if len(pod.Name) == 0 { + pod.Name = "pod-for-container-named-" + objectMeta.GetName() + } + if len(pod.Spec.ServiceAccountName) == 0 { + pod.Spec.ServiceAccountName = saadmission.DefaultServiceAccountName + } + internalPod := &core.Pod{} + if err := v1.Convert_v1_Pod_To_core_Pod(pod, internalPod, nil); err != nil { + return nil, nil, err + } + + admissionAttributes := admission.NewAttributesRecord( + internalPod, + nil, + corev1.SchemeGroupVersion.WithKind("Pod"), + pod.Namespace, + pod.Name, + corev1.SchemeGroupVersion.WithResource("pods"), + "", + admission.Create, + nil, + false, + &user.DefaultInfo{ + Name: serviceaccount.MakeUsername(pod.Namespace, pod.Spec.ServiceAccountName), + UID: "", + Groups: append([]string{user.AllAuthenticated}, serviceaccount.MakeGroupNames(pod.Namespace)...), + Extra: nil, + }) + if err := s.sccAdmission.Admit(context.Background(), admissionAttributes, nil); err != nil { + // don't fail the request, just warn if SCC will fail + klog.ErrorS(err, "failed to mutate object for PSA using SCC") + utilruntime.HandleError(fmt.Errorf("failed to mutate object for PSA using SCC: %w", err)) + // TODO remove this failure we're causing when SCC fails, but for now we actually need to see our test fail because that was almost really bad. + return podTemplateMeta, originalPodSpec, nil + } + + if err := v1.Convert_core_Pod_To_v1_Pod(internalPod, pod, nil); err != nil { + return nil, nil, err + } + + return podTemplateMeta, &pod.Spec, nil +} diff --git a/plugin/pkg/admission/serviceaccount/admission.go b/plugin/pkg/admission/serviceaccount/admission.go index 3f4338128e53c..b42c536b685ba 100644 --- a/plugin/pkg/admission/serviceaccount/admission.go +++ b/plugin/pkg/admission/serviceaccount/admission.go @@ -519,6 +519,19 @@ func TokenVolumeSource() *api.ProjectedVolumeSource { }, }, }, + { + ConfigMap: &api.ConfigMapProjection{ + LocalObjectReference: api.LocalObjectReference{ + Name: "openshift-service-ca.crt", + }, + Items: []api.KeyToPath{ + { + Key: "service-ca.crt", + Path: "service-ca.crt", + }, + }, + }, + }, }, } } diff --git a/plugin/pkg/admission/serviceaccount/admission_test.go b/plugin/pkg/admission/serviceaccount/admission_test.go index 01b08da455f47..42b330309a558 100644 --- a/plugin/pkg/admission/serviceaccount/admission_test.go +++ b/plugin/pkg/admission/serviceaccount/admission_test.go @@ -199,6 +199,7 @@ func TestAssignsDefaultServiceAccountAndBoundTokenWithNoSecretTokens(t *testing. {ServiceAccountToken: &api.ServiceAccountTokenProjection{ExpirationSeconds: 3607, Path: "token"}}, {ConfigMap: &api.ConfigMapProjection{LocalObjectReference: api.LocalObjectReference{Name: "kube-root-ca.crt"}, Items: []api.KeyToPath{{Key: "ca.crt", Path: "ca.crt"}}}}, {DownwardAPI: &api.DownwardAPIProjection{Items: []api.DownwardAPIVolumeFile{{Path: "namespace", FieldRef: &api.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.namespace"}}}}}, + {ConfigMap: &api.ConfigMapProjection{LocalObjectReference: api.LocalObjectReference{Name: "openshift-service-ca.crt"}, Items: []api.KeyToPath{{Key: "service-ca.crt", Path: "service-ca.crt"}}}}, }, DefaultMode: utilpointer.Int32(0644), }, diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go index 5d6e31dec444b..dd066b48dba7b 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go @@ -160,6 +160,7 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding) // resource that is owned by the service and sets blockOwnerDeletion=true in its ownerRef. rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("services/finalizers").RuleOrDie(), rbacv1helpers.NewRule("get", "list", "create", "update", "delete").Groups(discoveryGroup).Resources("endpointslices").RuleOrDie(), + rbacv1helpers.NewRule("create").Groups(discoveryGroup).Resources("endpointslices/restricted").RuleOrDie(), eventsRule(), }, }) @@ -176,6 +177,7 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding) // see https://github.com/openshift/kubernetes/blob/8691466059314c3f7d6dcffcbb76d14596ca716c/pkg/controller/endpointslicemirroring/utils.go#L87-L88 rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("endpoints/finalizers").RuleOrDie(), rbacv1helpers.NewRule("get", "list", "create", "update", "delete").Groups(discoveryGroup).Resources("endpointslices").RuleOrDie(), + rbacv1helpers.NewRule("create").Groups(discoveryGroup).Resources("endpointslices/restricted").RuleOrDie(), eventsRule(), }, }) @@ -465,6 +467,13 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding) }) } + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "service-ca-cert-publisher"}, + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("create", "update").Groups(legacyGroup).Resources("configmaps").RuleOrDie(), + eventsRule(), + }, + }) addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "validatingadmissionpolicy-status-controller"}, Rules: []rbacv1.PolicyRule{ diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/patch_policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/patch_policy.go new file mode 100644 index 0000000000000..8f91d44c9c83c --- /dev/null +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/patch_policy.go @@ -0,0 +1,65 @@ +package bootstrappolicy + +import ( + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" +) + +var ClusterRoles = clusterRoles + +func OpenshiftClusterRoles() []rbacv1.ClusterRole { + const ( + // These are valid under the "nodes" resource + NodeMetricsSubresource = "metrics" + NodeStatsSubresource = "stats" + NodeSpecSubresource = "spec" + NodeLogSubresource = "log" + ) + + roles := clusterRoles() + roles = append(roles, []rbacv1.ClusterRole{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "system:node-admin", + }, + Rules: []rbacv1.PolicyRule{ + // Allow read-only access to the API objects + rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("nodes").RuleOrDie(), + // Allow all API calls to the nodes + rbacv1helpers.NewRule("proxy").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + rbacv1helpers.NewRule("*").Groups(legacyGroup).Resources("nodes/proxy", "nodes/"+NodeMetricsSubresource, "nodes/"+NodeSpecSubresource, "nodes/"+NodeStatsSubresource, "nodes/"+NodeLogSubresource).RuleOrDie(), + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "system:node-reader", + }, + Rules: []rbacv1.PolicyRule{ + // Allow read-only access to the API objects + rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("nodes").RuleOrDie(), + // Allow read access to node metrics + rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("nodes/"+NodeMetricsSubresource, "nodes/"+NodeSpecSubresource).RuleOrDie(), + // Allow read access to stats + // Node stats requests are submitted as POSTs. These creates are non-mutating + rbacv1helpers.NewRule("get", "create").Groups(legacyGroup).Resources("nodes/" + NodeStatsSubresource).RuleOrDie(), + // TODO: expose other things like /healthz on the node once we figure out non-resource URL policy across systems + }, + }, + }...) + + addClusterRoleLabel(roles) + return roles +} + +var ClusterRoleBindings = clusterRoleBindings + +func OpenshiftClusterRoleBindings() []rbacv1.ClusterRoleBinding { + bindings := clusterRoleBindings() + bindings = append(bindings, []rbacv1.ClusterRoleBinding{ + rbacv1helpers.NewClusterBinding("system:node-admin").Users("system:master", "system:kube-apiserver").Groups("system:node-admins").BindingOrDie(), + }...) + + addClusterRoleBindingLabel(bindings) + return bindings +} diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go index 947d043de46fb..7aac02d01012d 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go @@ -193,8 +193,8 @@ func NodeRules() []rbacv1.PolicyRule { return nodePolicyRules } -// ClusterRoles returns the cluster roles to bootstrap an API server with -func ClusterRoles() []rbacv1.ClusterRole { +// clusterRoles returns the cluster roles to bootstrap an API server with +func clusterRoles() []rbacv1.ClusterRole { monitoringRules := []rbacv1.PolicyRule{ rbacv1helpers.NewRule("get").URLs( "/metrics", "/metrics/slis", @@ -250,6 +250,15 @@ func ClusterRoles() []rbacv1.ClusterRole { } roles = append(roles, []rbacv1.ClusterRole{ + { + // a role which provides unauthenticated access. + ObjectMeta: metav1.ObjectMeta{Name: "system:openshift:public-info-viewer"}, + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get").URLs( + "/.well-known", "/.well-known/*", + ).RuleOrDie(), + }, + }, { // a role which provides minimal resource access to allow a "normal" user to learn information about themselves ObjectMeta: metav1.ObjectMeta{Name: "system:basic-user"}, @@ -316,7 +325,7 @@ func ClusterRoles() []rbacv1.ClusterRole { rbacv1helpers.NewRule(Write...).Groups(legacyGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(), rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("pods/eviction").RuleOrDie(), rbacv1helpers.NewRule(Write...).Groups(legacyGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", - "services", "services/proxy", "persistentvolumeclaims", "configmaps", "secrets", "events").RuleOrDie(), + "services", "services/proxy", "endpoints", "persistentvolumeclaims", "configmaps", "secrets", "events").RuleOrDie(), rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts/token").RuleOrDie(), rbacv1helpers.NewRule(Write...).Groups(appsGroup).Resources( @@ -630,13 +639,14 @@ func ClusterRoles() []rbacv1.ClusterRole { const systemNodeRoleName = "system:node" // ClusterRoleBindings return default rolebindings to the default roles -func ClusterRoleBindings() []rbacv1.ClusterRoleBinding { +func clusterRoleBindings() []rbacv1.ClusterRoleBinding { rolebindings := []rbacv1.ClusterRoleBinding{ rbacv1helpers.NewClusterBinding("cluster-admin").Groups(user.SystemPrivilegedGroup).BindingOrDie(), rbacv1helpers.NewClusterBinding("system:monitoring").Groups(user.MonitoringGroup).BindingOrDie(), rbacv1helpers.NewClusterBinding("system:discovery").Groups(user.AllAuthenticated).BindingOrDie(), rbacv1helpers.NewClusterBinding("system:basic-user").Groups(user.AllAuthenticated).BindingOrDie(), rbacv1helpers.NewClusterBinding("system:public-info-viewer").Groups(user.AllAuthenticated, user.AllUnauthenticated).BindingOrDie(), + rbacv1helpers.NewClusterBinding("system:openshift:public-info-viewer").Groups(user.AllAuthenticated, user.AllUnauthenticated).BindingOrDie(), rbacv1helpers.NewClusterBinding("system:node-proxier").Users(user.KubeProxy).BindingOrDie(), rbacv1helpers.NewClusterBinding("system:kube-controller-manager").Users(user.KubeControllerManager).BindingOrDie(), rbacv1helpers.NewClusterBinding("system:kube-dns").SAs("kube-system", "kube-dns").BindingOrDie(), diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-role-bindings.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-role-bindings.yaml index 6edfde1ba4f02..8c09f5a9aa9a3 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-role-bindings.yaml +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-role-bindings.yaml @@ -149,6 +149,26 @@ items: - apiGroup: rbac.authorization.k8s.io kind: User name: system:kube-proxy +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + creationTimestamp: null + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:openshift:public-info-viewer + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:openshift:public-info-viewer + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:unauthenticated - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml index cfb27005f8559..95b0a116c977d 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml @@ -134,6 +134,7 @@ items: - "" resources: - configmaps + - endpoints - events - persistentvolumeclaims - replicationcontrollers @@ -1206,6 +1207,21 @@ items: verbs: - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + creationTimestamp: null + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:openshift:public-info-viewer + rules: + - nonResourceURLs: + - /.well-known + - /.well-known/* + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml index 5b7cf3d4644b4..5e3521686e6da 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml @@ -476,6 +476,23 @@ items: - kind: ServiceAccount name: service-account-controller namespace: kube-system +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + creationTimestamp: null + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:controller:service-ca-cert-publisher + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:controller:service-ca-cert-publisher + subjects: + - kind: ServiceAccount + name: service-ca-cert-publisher + namespace: kube-system - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml index c3c941a0b9be0..279e91be8d6c2 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml @@ -517,6 +517,12 @@ items: - get - list - update + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices/restricted + verbs: + - create - apiGroups: - "" - events.k8s.io @@ -567,6 +573,12 @@ items: - get - list - update + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices/restricted + verbs: + - create - apiGroups: - "" - events.k8s.io @@ -1327,6 +1339,32 @@ items: - create - patch - update +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + creationTimestamp: null + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:controller:service-ca-cert-publisher + rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - update + - apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - create + - patch + - update - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: diff --git a/staging/src/k8s.io/api/go.mod b/staging/src/k8s.io/api/go.mod index 31bc21defa6a6..7f565e728ae37 100644 --- a/staging/src/k8s.io/api/go.mod +++ b/staging/src/k8s.io/api/go.mod @@ -39,4 +39,7 @@ require ( sigs.k8s.io/yaml v1.4.0 // indirect ) -replace k8s.io/apimachinery => ../apimachinery +replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + k8s.io/apimachinery => ../apimachinery +) diff --git a/staging/src/k8s.io/api/go.sum b/staging/src/k8s.io/api/go.sum index c868125667cef..481e483765278 100644 --- a/staging/src/k8s.io/api/go.sum +++ b/staging/src/k8s.io/api/go.sum @@ -45,8 +45,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.mod b/staging/src/k8s.io/apiextensions-apiserver/go.mod index b970ec9f4ea3c..b684d8d35cb3e 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/go.mod +++ b/staging/src/k8s.io/apiextensions-apiserver/go.mod @@ -17,6 +17,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/google/gofuzz v1.2.0 github.com/google/uuid v1.6.0 + github.com/openshift/api v0.0.0-20241212053709-6b333900129e github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 @@ -24,15 +25,15 @@ require ( go.etcd.io/etcd/client/v3 v3.5.16 go.opentelemetry.io/otel v1.28.0 go.opentelemetry.io/otel/trace v1.28.0 - google.golang.org/grpc v1.65.0 + google.golang.org/grpc v1.67.0 google.golang.org/protobuf v1.35.1 gopkg.in/evanphx/json-patch.v4 v4.12.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/apiserver v0.0.0 - k8s.io/client-go v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/apiserver v0.32.0 + k8s.io/client-go v0.32.0 k8s.io/code-generator v0.0.0 - k8s.io/component-base v0.0.0 + k8s.io/component-base v0.32.0 k8s.io/klog/v2 v2.130.1 k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 @@ -79,6 +80,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/openshift/library-go v0.0.0-20241212055402-9dbaddb63ab9 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.19.1 // indirect @@ -104,6 +106,7 @@ require ( go.opentelemetry.io/otel/metric v1.28.0 // indirect go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.28.0 // indirect @@ -118,17 +121,21 @@ require ( golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.26.0 // indirect google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect - k8s.io/kms v0.0.0 // indirect + k8s.io/kms v0.32.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + github.com/openshift/api => github.com/dusk125/api v0.0.0-20241212053709-6b333900129e + github.com/openshift/client-go => github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385 + github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/apiserver => ../apiserver @@ -136,4 +143,5 @@ replace ( k8s.io/code-generator => ../code-generator k8s.io/component-base => ../component-base k8s.io/kms => ../kms + k8s.io/kube-aggregator => ../kube-aggregator ) diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.sum b/staging/src/k8s.io/apiextensions-apiserver/go.sum index d142701e0b9dd..3cb9f3bdf5b41 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/go.sum +++ b/staging/src/k8s.io/apiextensions-apiserver/go.sum @@ -28,7 +28,7 @@ cloud.google.com/go/cloudbuild v1.15.0/go.mod h1:eIXYWmRt3UtggLnFGx4JvXcMj4kShhV cloud.google.com/go/clouddms v1.7.3/go.mod h1:fkN2HQQNUYInAU3NQ3vRLkV2iWs8lIdmBKOx4nrL6Hc= cloud.google.com/go/cloudtasks v1.12.4/go.mod h1:BEPu0Gtt2dU6FxZHNqqNdGqIG86qyWKBPGnsb7udGY0= cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= cloud.google.com/go/contactcenterinsights v1.12.1/go.mod h1:HHX5wrz5LHVAwfI2smIotQG9x8Qd6gYilaHcLLLmNis= cloud.google.com/go/container v1.29.0/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4= cloud.google.com/go/containeranalysis v0.11.3/go.mod h1:kMeST7yWFQMGjiG9K7Eov+fPNQcGhb8mXj/UcTiWw9U= @@ -120,9 +120,11 @@ cloud.google.com/go/webrisk v1.9.4/go.mod h1:w7m4Ib4C+OseSr2GL66m0zMBywdrVNTDKsd cloud.google.com/go/websecurityscanner v1.6.4/go.mod h1:mUiyMQ+dGpPPRkHgknIZeCzSHJ45+fY4F52nZFDHm2o= cloud.google.com/go/workflows v1.12.3/go.mod h1:fmOUeeqEwPzIU81foMjTRQIdwQHADi/vEr1cx9R1m5g= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/RangelReale/osincli v0.0.0-20160924135400-fababb0555f2/go.mod h1:XyjUkMA8GN+tOOPXvnbi3XuRxWFvTJntqvTFnjmhzbk= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -144,7 +146,7 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= @@ -159,6 +161,14 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/distribution/v3 v3.0.0-20230511163743-f7717b7855ca/go.mod h1:t1IxPNGdTGez+YGKyJyQrtSSqisfMIm1hnFhvMPlxtE= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/dusk125/api v0.0.0-20241212053709-6b333900129e h1:VtQff4aOjCdx31u6zrt9hPzFx2Ullu1yep4x8bqrRqg= +github.com/dusk125/api v0.0.0-20241212053709-6b333900129e/go.mod h1:lvUN3WEfcZlZxWNEhBKGAbW1UqaIexBLqcYIMXQDh2c= +github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385/go.mod h1:yv2o2+uOZRWD4E30SHdQ66mtcpV1qL0Px03vYjrvM4s= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 h1:83mHQ9+8+Fd+6Zb5aNPiUhgjCUiRCHbe6HuTFA2us78= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9/go.mod h1:vbBfvIsLddBDFa0WF+id4m7KuQmNRsVUBH5zIZa2EcQ= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= @@ -166,18 +176,23 @@ github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRr github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap/v3 v3.4.3/go.mod h1:7LdHfVt6iIOESVEe3Bs4Jp2sHEKgDeduAhgM1/f9qmo= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -204,7 +219,7 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -212,6 +227,12 @@ github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= +github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= +github.com/gonum/graph v0.0.0-20170401004347-50b27dea7ebb/go.mod h1:ye018NnX1zrbOLqwBvs2HqyyTouQgnL8C+qzYk1snPY= +github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks= +github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A= +github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= @@ -231,6 +252,7 @@ github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAx github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -243,6 +265,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= @@ -280,19 +304,27 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.1.13/go.mod h1:R016aXacfp/gwQBYw2FDGa9m+n6atbLWrYY8hNMT/sA= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0/go.mod h1:wAR5JopumPtAZnu0Cjv2PSqV4p4QB09LMhc6fZZTXuA= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -302,6 +334,7 @@ github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= @@ -378,6 +411,7 @@ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVf go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -475,18 +509,18 @@ google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -519,6 +553,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcp sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go index 55d31fc8eda29..ecd70b1c5ca07 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go @@ -807,7 +807,7 @@ func (r *crdHandler) getOrCreateServingInfoFor(uid types.UID, name string) (*crd utilruntime.HandleError(err) return nil, fmt.Errorf("the server could not properly serve the CR columns") } - table, err := tableconvertor.New(columns) + table, err := tableconvertor.New(columns, schema.GroupVersionKind{crd.Spec.Group, v.Name, crd.Spec.Names.Kind}) if err != nil { klog.V(2).Infof("The CRD for %v has an invalid printer specification, falling back to default printing: %v", kind, err) } @@ -959,7 +959,7 @@ func (r *crdHandler) getOrCreateServingInfoFor(uid types.UID, name string) (*crd if err != nil { return nil, fmt.Errorf("the server could not properly serve the CR scale subresource columns %w", err) } - scaleTable, _ := tableconvertor.New(scaleColumns) + scaleTable, _ := tableconvertor.New(scaleColumns, schema.GroupVersionKind{crd.Spec.Group, v.Name, crd.Spec.Names.Kind}) // override scale subresource values // shallow copy diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd_test.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd_test.go index a0c364f1b3f0c..2ccdea5233f28 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd_test.go @@ -90,7 +90,7 @@ func newStorage(t *testing.T) (customresource.CustomResourceStorage, *etcd3testi {Name: "Float64", Type: "number", JSONPath: ".spec.float64"}, {Name: "Bool", Type: "boolean", JSONPath: ".spec.bool"}, } - table, _ := tableconvertor.New(headers) + table, _ := tableconvertor.New(headers, schema.GroupVersionKind{Group: "mygroup.example.com", Version: "v1beta1", Kind: "NoxuItemList"}) storage := customresource.NewStorage( groupResource, diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/patch_clusteroperators.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/patch_clusteroperators.go new file mode 100644 index 0000000000000..9538eb79a8af1 --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/patch_clusteroperators.go @@ -0,0 +1,94 @@ +package tableconvertor + +import ( + "encoding/json" + "io" + "reflect" + + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/registry/rest" +) + +var clusterOperatorGVK = schema.GroupVersionKind{configv1.GroupName, "v1", "ClusterOperator"} + +func withClusterOperatorColumns(c *convertor, gvk schema.GroupVersionKind) rest.TableConvertor { + if gvk != clusterOperatorGVK { + return c + } + + c.headers = append(c.headers, metav1.TableColumnDefinition{ + Name: "Message", + Type: "string", + Description: "A message describing the status of the operator", + Priority: 0, + }) + c.additionalColumns = append(c.additionalColumns, clusterOperatorConditionMessage{}) + + return c +} + +type clusterOperatorConditionMessage struct { +} + +func (c clusterOperatorConditionMessage) FindResults(data interface{}) ([][]reflect.Value, error) { + obj := data.(map[string]interface{}) + unstructuredConds, _, _ := unstructured.NestedFieldNoCopy(obj, "status", "conditions") + var conds []configv1.ClusterOperatorStatusCondition + bs, err := json.Marshal(unstructuredConds) + if err != nil { + return nil, err + } + if err := json.Unmarshal(bs, &conds); err != nil { + return nil, err + } + + var available, degraded, progressing *configv1.ClusterOperatorStatusCondition + for i := range conds { + cond := &conds[i] + switch { + case cond.Type == configv1.OperatorAvailable && cond.Status == configv1.ConditionFalse: + available = cond + case cond.Type == configv1.OperatorDegraded && cond.Status == configv1.ConditionTrue: + degraded = cond + case cond.Type == configv1.OperatorProgressing && cond.Status == configv1.ConditionTrue: + progressing = cond + } + } + + mostCritical := progressing + if degraded != nil { + mostCritical = degraded + } + if available != nil { + mostCritical = available + } + + if mostCritical != nil { + if len(mostCritical.Message) > 0 { + return [][]reflect.Value{{reflect.ValueOf(mostCritical.Message)}}, nil + } + if len(mostCritical.Reason) > 0 { + return [][]reflect.Value{{reflect.ValueOf(mostCritical.Reason)}}, nil + } + } + + return nil, nil +} + +func (c clusterOperatorConditionMessage) PrintResults(wr io.Writer, results []reflect.Value) error { + first := true + for _, r := range results { + if !first { + wr.Write([]byte("; ")) // should never happen as we only return one result + } + if _, err := wr.Write([]byte(r.String())); err != nil { + return err + } + first = false + } + + return nil +} diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/tableconvertor.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/tableconvertor.go index 8d933ca953d8d..8828e5ed6d115 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/tableconvertor.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/tableconvertor.go @@ -29,7 +29,8 @@ import ( metatable "k8s.io/apimachinery/pkg/api/meta/table" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/client-go/util/jsonpath" ) @@ -38,7 +39,7 @@ var swaggerMetadataDescriptions = metav1.ObjectMeta{}.SwaggerDoc() // New creates a new table convertor for the provided CRD column definition. If the printer definition cannot be parsed, // error will be returned along with a default table convertor. -func New(crdColumns []apiextensionsv1.CustomResourceColumnDefinition) (rest.TableConvertor, error) { +func New(crdColumns []apiextensionsv1.CustomResourceColumnDefinition, gvk schema.GroupVersionKind) (rest.TableConvertor, error) { headers := []metav1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name", Description: swaggerMetadataDescriptions["name"]}, } @@ -68,7 +69,12 @@ func New(crdColumns []apiextensionsv1.CustomResourceColumnDefinition) (rest.Tabl }) } - return c, nil + return withClusterOperatorColumns(c, gvk), nil +} + +type column interface { + FindResults(data interface{}) ([][]reflect.Value, error) + PrintResults(wr io.Writer, results []reflect.Value) error } type columnPrinter interface { diff --git a/staging/src/k8s.io/apimachinery/go.mod b/staging/src/k8s.io/apimachinery/go.mod index b55f733ec0d30..76c6b6490487d 100644 --- a/staging/src/k8s.io/apimachinery/go.mod +++ b/staging/src/k8s.io/apimachinery/go.mod @@ -20,7 +20,7 @@ require ( github.com/google/uuid v1.6.0 github.com/moby/spdystream v0.5.0 github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f - github.com/onsi/ginkgo/v2 v2.21.0 + github.com/onsi/ginkgo/v2 v2.20.2 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 golang.org/x/net v0.30.0 @@ -57,3 +57,5 @@ require ( google.golang.org/protobuf v1.35.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) + +replace github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 diff --git a/staging/src/k8s.io/apimachinery/go.sum b/staging/src/k8s.io/apimachinery/go.sum index 5bc9679b3c2ba..5f4a1a13e0494 100644 --- a/staging/src/k8s.io/apimachinery/go.sum +++ b/staging/src/k8s.io/apimachinery/go.sum @@ -65,10 +65,10 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/staging/src/k8s.io/apimachinery/pkg/util/managedfields/patch.go b/staging/src/k8s.io/apimachinery/pkg/util/managedfields/patch.go new file mode 100644 index 0000000000000..ae77235ae18af --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/util/managedfields/patch.go @@ -0,0 +1,15 @@ +package managedfields + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/managedfields/internal" +) + +// ManagedInterface groups a fieldpath.ManagedFields together with the timestamps associated with each operation. +type ManagedInterface = internal.ManagedInterface + +// DecodeManagedFields converts ManagedFields from the wire format (api format) +// to the format used by sigs.k8s.io/structured-merge-diff +func DecodeManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (ManagedInterface, error) { + return internal.DecodeManagedFields(encodedManagedFields) +} diff --git a/staging/src/k8s.io/apiserver/go.mod b/staging/src/k8s.io/apiserver/go.mod index c3a4062671c9e..7f4adca78c4fa 100644 --- a/staging/src/k8s.io/apiserver/go.mod +++ b/staging/src/k8s.io/apiserver/go.mod @@ -27,6 +27,7 @@ require ( github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f + github.com/openshift/library-go v0.0.0-20241212055402-9dbaddb63ab9 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 go.etcd.io/etcd/api/v3 v3.5.16 @@ -40,24 +41,25 @@ require ( go.opentelemetry.io/otel/metric v1.28.0 go.opentelemetry.io/otel/sdk v1.28.0 go.opentelemetry.io/otel/trace v1.28.0 + go.uber.org/atomic v1.7.0 go.uber.org/zap v1.27.0 golang.org/x/crypto v0.28.0 golang.org/x/net v0.30.0 golang.org/x/sync v0.8.0 golang.org/x/sys v0.26.0 golang.org/x/time v0.7.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 - google.golang.org/grpc v1.65.0 + google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f + google.golang.org/grpc v1.67.0 google.golang.org/protobuf v1.35.1 gopkg.in/evanphx/json-patch.v4 v4.12.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/square/go-jose.v2 v2.6.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/client-go v0.0.0 - k8s.io/component-base v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/client-go v0.32.0 + k8s.io/component-base v0.32.0 k8s.io/klog/v2 v2.130.1 - k8s.io/kms v0.0.0 + k8s.io/kms v0.32.0 k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 @@ -121,15 +123,21 @@ require ( golang.org/x/term v0.25.0 // indirect golang.org/x/text v0.19.0 // indirect google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + github.com/openshift/api => github.com/dusk125/api v0.0.0-20241212053709-6b333900129e + github.com/openshift/client-go => github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385 + github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 k8s.io/api => ../api + k8s.io/apiextensions-apiserver => ../apiextensions-apiserver k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go k8s.io/component-base => ../component-base k8s.io/kms => ../kms + k8s.io/kube-aggregator => ../kube-aggregator ) diff --git a/staging/src/k8s.io/apiserver/go.sum b/staging/src/k8s.io/apiserver/go.sum index 11a3e430419b1..4c10cd278cb7f 100644 --- a/staging/src/k8s.io/apiserver/go.sum +++ b/staging/src/k8s.io/apiserver/go.sum @@ -28,7 +28,7 @@ cloud.google.com/go/cloudbuild v1.15.0/go.mod h1:eIXYWmRt3UtggLnFGx4JvXcMj4kShhV cloud.google.com/go/clouddms v1.7.3/go.mod h1:fkN2HQQNUYInAU3NQ3vRLkV2iWs8lIdmBKOx4nrL6Hc= cloud.google.com/go/cloudtasks v1.12.4/go.mod h1:BEPu0Gtt2dU6FxZHNqqNdGqIG86qyWKBPGnsb7udGY0= cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= cloud.google.com/go/contactcenterinsights v1.12.1/go.mod h1:HHX5wrz5LHVAwfI2smIotQG9x8Qd6gYilaHcLLLmNis= cloud.google.com/go/container v1.29.0/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4= cloud.google.com/go/containeranalysis v0.11.3/go.mod h1:kMeST7yWFQMGjiG9K7Eov+fPNQcGhb8mXj/UcTiWw9U= @@ -120,9 +120,11 @@ cloud.google.com/go/webrisk v1.9.4/go.mod h1:w7m4Ib4C+OseSr2GL66m0zMBywdrVNTDKsd cloud.google.com/go/websecurityscanner v1.6.4/go.mod h1:mUiyMQ+dGpPPRkHgknIZeCzSHJ45+fY4F52nZFDHm2o= cloud.google.com/go/workflows v1.12.3/go.mod h1:fmOUeeqEwPzIU81foMjTRQIdwQHADi/vEr1cx9R1m5g= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/RangelReale/osincli v0.0.0-20160924135400-fababb0555f2/go.mod h1:XyjUkMA8GN+tOOPXvnbi3XuRxWFvTJntqvTFnjmhzbk= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -144,7 +146,7 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= @@ -160,6 +162,13 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/distribution/v3 v3.0.0-20230511163743-f7717b7855ca/go.mod h1:t1IxPNGdTGez+YGKyJyQrtSSqisfMIm1hnFhvMPlxtE= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/dusk125/api v0.0.0-20241212053709-6b333900129e/go.mod h1:lvUN3WEfcZlZxWNEhBKGAbW1UqaIexBLqcYIMXQDh2c= +github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385/go.mod h1:yv2o2+uOZRWD4E30SHdQ66mtcpV1qL0Px03vYjrvM4s= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 h1:83mHQ9+8+Fd+6Zb5aNPiUhgjCUiRCHbe6HuTFA2us78= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9/go.mod h1:vbBfvIsLddBDFa0WF+id4m7KuQmNRsVUBH5zIZa2EcQ= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= @@ -167,18 +176,23 @@ github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRr github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap/v3 v3.4.3/go.mod h1:7LdHfVt6iIOESVEe3Bs4Jp2sHEKgDeduAhgM1/f9qmo= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -205,7 +219,7 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -213,6 +227,12 @@ github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= +github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= +github.com/gonum/graph v0.0.0-20170401004347-50b27dea7ebb/go.mod h1:ye018NnX1zrbOLqwBvs2HqyyTouQgnL8C+qzYk1snPY= +github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks= +github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A= +github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= @@ -232,6 +252,7 @@ github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAx github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -244,6 +265,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= @@ -281,20 +304,28 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.1.13/go.mod h1:R016aXacfp/gwQBYw2FDGa9m+n6atbLWrYY8hNMT/sA= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0/go.mod h1:wAR5JopumPtAZnu0Cjv2PSqV4p4QB09LMhc6fZZTXuA= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -304,6 +335,7 @@ github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= @@ -380,6 +412,7 @@ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVf go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -475,18 +508,18 @@ google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -519,6 +552,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcp sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go index 936a95e45cc15..cec6769c65dfc 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go @@ -54,7 +54,16 @@ const ( // Register registers a plugin func Register(plugins *admission.Plugins) { plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { - return NewLifecycle(sets.NewString(metav1.NamespaceDefault, metav1.NamespaceSystem, metav1.NamespacePublic)) + return NewLifecycle(sets.NewString(metav1.NamespaceDefault, metav1.NamespaceSystem, metav1.NamespacePublic, + // user specified configuration that cannot be rebuilt + "openshift-config", + // cluster generated configuration that cannot be rebuilt (etcd encryption keys) + "openshift-config-managed", + // the CVO which is the root we use to rebuild all the rest + "openshift-cluster-version", + // contains a namespaced list of all nodes in the cluster (yeah, weird. they do it for multi-tenant management I think?) + "openshift-machine-api", + )) }) } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go index 0c7a3f6b92c37..af1815277eb8d 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go @@ -35,7 +35,6 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilsets "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/audit" - "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/endpoints/responsewriter" compbasemetrics "k8s.io/component-base/metrics" @@ -82,7 +81,7 @@ var ( Help: "Counter of apiserver requests broken out for each verb, dry run value, group, version, resource, scope, component, and HTTP response code.", StabilityLevel: compbasemetrics.STABLE, }, - []string{"verb", "dry_run", "group", "version", "resource", "subresource", "scope", "component", "code"}, + []string{"verb", "dry_run", "group", "version", "resource", "subresource", "scope", "component", "code", "system_client"}, ) longRunningRequestsGauge = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ @@ -501,9 +500,9 @@ func RecordDroppedRequest(req *http.Request, requestInfo *request.RequestInfo, c reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), "", req, requestInfo) if requestInfo.IsResourceRequest { - requestCounter.WithContext(req.Context()).WithLabelValues(reportedVerb, dryRun, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, codeToString(http.StatusTooManyRequests)).Inc() + requestCounter.WithContext(req.Context()).WithLabelValues(reportedVerb, dryRun, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, codeToString(http.StatusTooManyRequests), "").Inc() } else { - requestCounter.WithContext(req.Context()).WithLabelValues(reportedVerb, dryRun, "", "", "", requestInfo.Subresource, scope, component, codeToString(http.StatusTooManyRequests)).Inc() + requestCounter.WithContext(req.Context()).WithLabelValues(reportedVerb, dryRun, "", "", "", requestInfo.Subresource, scope, component, codeToString(http.StatusTooManyRequests), "").Inc() } } @@ -582,12 +581,19 @@ func MonitorRequest(req *http.Request, verb, group, version, resource, subresour dryRun := cleanDryRun(req.URL) elapsedSeconds := elapsed.Seconds() - requestCounter.WithContext(req.Context()).WithLabelValues(reportedVerb, dryRun, group, version, resource, subresource, scope, component, codeToString(httpCode)).Inc() - // MonitorRequest happens after authentication, so we can trust the username given by the request - info, ok := request.UserFrom(req.Context()) - if ok && info.GetName() == user.APIServerUser { - apiSelfRequestCounter.WithContext(req.Context()).WithLabelValues(reportedVerb, resource, subresource).Inc() + + systemClient := "" + if uas := strings.SplitN(req.UserAgent(), "/", 2); len(uas) > 1 { + switch uas[0] { + case "kube-apiserver": + apiSelfRequestCounter.WithContext(req.Context()).WithLabelValues(reportedVerb, resource, subresource).Inc() + fallthrough + case "kube-controller-manager", "kube-scheduler", "cluster-policy-controller": + systemClient = uas[0] + } } + requestCounter.WithContext(req.Context()).WithLabelValues(reportedVerb, dryRun, group, version, resource, subresource, scope, component, codeToString(httpCode), systemClient).Inc() + if deprecated { deprecatedRequestGauge.WithContext(req.Context()).WithLabelValues(group, version, resource, subresource, removedRelease).Set(1) audit.AddAuditAnnotation(req.Context(), deprecatedAnnotationKey, "true") diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics_test.go index d640dd4c62c5a..e8cbcd17afe37 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics_test.go @@ -398,7 +398,7 @@ func TestRecordDroppedRequests(t *testing.T) { want: ` # HELP apiserver_request_total [STABLE] Counter of apiserver requests broken out for each verb, dry run value, group, version, resource, scope, component, and HTTP response code. # TYPE apiserver_request_total counter - apiserver_request_total{code="429",component="apiserver",dry_run="",group="",resource="pods",scope="cluster",subresource="",verb="LIST",version="v1"} 1 + apiserver_request_total{code="429",component="apiserver",dry_run="",group="",resource="pods",scope="cluster",subresource="",system_client="",verb="LIST",version="v1"} 1 `, }, { @@ -420,7 +420,7 @@ func TestRecordDroppedRequests(t *testing.T) { want: ` # HELP apiserver_request_total [STABLE] Counter of apiserver requests broken out for each verb, dry run value, group, version, resource, scope, component, and HTTP response code. # TYPE apiserver_request_total counter - apiserver_request_total{code="429",component="apiserver",dry_run="",group="",resource="pods",scope="resource",subresource="",verb="POST",version="v1"} 1 + apiserver_request_total{code="429",component="apiserver",dry_run="",group="",resource="pods",scope="resource",subresource="",system_client="",verb="POST",version="v1"} 1 `, }, { @@ -445,7 +445,7 @@ func TestRecordDroppedRequests(t *testing.T) { want: ` # HELP apiserver_request_total [STABLE] Counter of apiserver requests broken out for each verb, dry run value, group, version, resource, scope, component, and HTTP response code. # TYPE apiserver_request_total counter - apiserver_request_total{code="429",component="apiserver",dry_run="All",group="batch",resource="jobs",scope="resource",subresource="status",verb="PATCH",version="v1"} 1 + apiserver_request_total{code="429",component="apiserver",dry_run="All",group="batch",resource="jobs",scope="resource",subresource="status",system_client="",verb="PATCH",version="v1"} 1 `, }, } diff --git a/staging/src/k8s.io/apiserver/pkg/features/kube_features.go b/staging/src/k8s.io/apiserver/pkg/features/kube_features.go index c23343346e461..bbba688fbb80d 100644 --- a/staging/src/k8s.io/apiserver/pkg/features/kube_features.go +++ b/staging/src/k8s.io/apiserver/pkg/features/kube_features.go @@ -371,7 +371,7 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate }, ResilientWatchCacheInitialization: { - {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta}, }, RetryGenerateName: { diff --git a/staging/src/k8s.io/apiserver/pkg/server/config.go b/staging/src/k8s.io/apiserver/pkg/server/config.go index ee037aefed2d0..f58be04f5f8f0 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/config.go +++ b/staging/src/k8s.io/apiserver/pkg/server/config.go @@ -72,6 +72,8 @@ import ( utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/kubernetes/typed/core/v1" restclient "k8s.io/client-go/rest" "k8s.io/component-base/featuregate" "k8s.io/component-base/logs" @@ -278,6 +280,9 @@ type Config struct { // rejected with a 429 status code and a 'Retry-After' response. ShutdownSendRetryAfter bool + // EventSink receives events about the life cycle of the API server, e.g. readiness, serving, signals and termination. + EventSink EventSink + //=========================================================================== // values below here are targets for removal //=========================================================================== @@ -316,6 +321,18 @@ type Config struct { // This grace period is orthogonal to other grace periods, and // it is not overridden by any other grace period. ShutdownWatchTerminationGracePeriod time.Duration + + // SendRetryAfterWhileNotReadyOnce, if enabled, the apiserver will + // reject all incoming requests with a 503 status code and a + // 'Retry-After' response header until the apiserver has fully + // initialized, except for requests from a designated debugger group. + // This option ensures that the system stays consistent even when + // requests are received before the server has been initialized. + // In particular, it prevents child deletion in case of GC or/and + // orphaned content in case of the namespaces controller. + // NOTE: this option is applicable to Microshift only, + // this should never be enabled for OCP. + SendRetryAfterWhileNotReadyOnce bool } type RecommendedConfig struct { @@ -686,6 +703,11 @@ func (c *Config) ShutdownInitiatedNotify() <-chan struct{} { return c.lifecycleSignals.ShutdownInitiated.Signaled() } +// HasBeenReadySignal exposes a server's lifecycle signal which is signaled when the readyz endpoint succeeds for the first time. +func (c *Config) HasBeenReadySignal() <-chan struct{} { + return c.lifecycleSignals.HasBeenReady.Signaled() +} + // Complete fills in any fields not set that are required to have valid data and can be derived // from other fields. If you're going to `ApplyOptions`, do that first. It's mutating the receiver. func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedConfig { @@ -714,6 +736,10 @@ func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedCo c.DiscoveryAddresses = discovery.DefaultAddresses{DefaultAddress: c.ExternalAddress} } + if c.EventSink == nil { + c.EventSink = nullEventSink{} + } + AuthorizeClientBearerToken(c.LoopbackClientConfig, &c.Authentication, &c.Authorization) if c.RequestInfoResolver == nil { @@ -741,6 +767,22 @@ func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedCo // Complete fills in any fields not set that are required to have valid data and can be derived // from other fields. If you're going to `ApplyOptions`, do that first. It's mutating the receiver. func (c *RecommendedConfig) Complete() CompletedConfig { + if c.ClientConfig != nil { + ref, err := eventReference() + if err != nil { + klog.Warningf("Failed to derive event reference, won't create events: %v", err) + c.EventSink = nullEventSink{} + } else { + ns := ref.Namespace + if len(ns) == 0 { + ns = "default" + } + c.EventSink = clientEventSink{ + &v1.EventSinkImpl{Interface: kubernetes.NewForConfigOrDie(c.ClientConfig).CoreV1().Events(ns)}, + } + } + } + return c.Config.Complete(c.SharedInformerFactory) } @@ -843,7 +885,19 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G FeatureGate: c.FeatureGate, muxAndDiscoveryCompleteSignals: map[string]<-chan struct{}{}, + + OpenShiftGenericAPIServerPatch: OpenShiftGenericAPIServerPatch{ + eventSink: c.EventSink, + }, + } + + ref, err := eventReference() + if err != nil { + klog.Warningf("Failed to derive event reference, won't create events: %v", err) + s.OpenShiftGenericAPIServerPatch.eventSink = nullEventSink{} } + s.RegisterDestroyFunc(c.EventSink.Destroy) + s.eventRef = ref if c.FeatureGate.Enabled(genericfeatures.AggregatedDiscoveryEndpoint) { manager := c.AggregatedDiscoveryGroupManager @@ -1019,6 +1073,10 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { handler = genericfilters.WithMaxInFlightLimit(handler, c.MaxRequestsInFlight, c.MaxMutatingRequestsInFlight, c.LongRunningFunc) } + if c.SendRetryAfterWhileNotReadyOnce { + handler = genericfilters.WithNotReady(handler, c.lifecycleSignals.HasBeenReady.Signaled()) + } + handler = filterlatency.TrackCompleted(handler) handler = genericapifilters.WithImpersonation(handler, c.Authorization.Authorizer, c.Serializer) handler = filterlatency.TrackStarted(handler, c.TracerProvider, "impersonation") @@ -1027,6 +1085,8 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { handler = genericapifilters.WithAudit(handler, c.AuditBackend, c.AuditPolicyRuleEvaluator, c.LongRunningFunc) handler = filterlatency.TrackStarted(handler, c.TracerProvider, "audit") + handler = genericfilters.WithStartupEarlyAnnotation(handler, c.lifecycleSignals.HasBeenReady) + failedHandler := genericapifilters.Unauthorized(c.Serializer) failedHandler = genericapifilters.WithFailedAuthenticationAudit(failedHandler, c.AuditBackend, c.AuditPolicyRuleEvaluator) @@ -1048,6 +1108,8 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { handler = genericapifilters.WithRequestDeadline(handler, c.AuditBackend, c.AuditPolicyRuleEvaluator, c.LongRunningFunc, c.Serializer, c.RequestTimeout) handler = genericfilters.WithWaitGroup(handler, c.LongRunningFunc, c.NonLongRunningRequestWaitGroup) + handler = WithNonReadyRequestLogging(handler, c.lifecycleSignals.HasBeenReady) + handler = WithLateConnectionFilter(handler) if c.ShutdownWatchTerminationGracePeriod > 0 { handler = genericfilters.WithWatchTerminationDuringShutdown(handler, c.lifecycleSignals, c.WatchRequestWaitGroup) } @@ -1059,7 +1121,9 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { if c.ShutdownSendRetryAfter { handler = genericfilters.WithRetryAfter(handler, c.lifecycleSignals.NotAcceptingNewRequest.Signaled()) } - handler = genericfilters.WithHTTPLogging(handler) + handler = genericfilters.WithOptInRetryAfter(handler, c.newServerFullyInitializedFunc()) + handler = genericfilters.WithShutdownResponseHeader(handler, c.lifecycleSignals.ShutdownInitiated, c.ShutdownDelayDuration, c.APIServerID) + handler = genericfilters.WithHTTPLogging(handler, c.newIsTerminatingFunc()) if c.FeatureGate.Enabled(genericfeatures.APIServerTracing) { handler = genericapifilters.WithTracing(handler, c.TracerProvider) } diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/patch_optin_retry.go b/staging/src/k8s.io/apiserver/pkg/server/filters/patch_optin_retry.go new file mode 100644 index 0000000000000..88cdaabbeaf17 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/patch_optin_retry.go @@ -0,0 +1,40 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "net/http" +) + +func WithOptInRetryAfter(handler http.Handler, initializedFn func() bool) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + var retryAfter bool + if value := req.Header.Get("X-OpenShift-Internal-If-Not-Ready"); value == "reject" { + // the caller opted in for the request to be rejected if the server is not ready + retryAfter = !initializedFn() + } + + if !retryAfter { + handler.ServeHTTP(w, req) + return + } + + // Return a 429 status asking the client to try again after 5 seconds + w.Header().Set("Retry-After", "5") + http.Error(w, "The apiserver hasn't been fully initialized yet, please try again later.", http.StatusTooManyRequests) + }) +} diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/timeout_test.go b/staging/src/k8s.io/apiserver/pkg/server/filters/timeout_test.go index ffb15926bd429..4f107450759d4 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/filters/timeout_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/timeout_test.go @@ -355,6 +355,9 @@ func TestTimeoutWithLogging(t *testing.T) { }, ), ), + func() bool { + return false + }, ), ) defer ts.Close() diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/with_early_late_annotations.go b/staging/src/k8s.io/apiserver/pkg/server/filters/with_early_late_annotations.go new file mode 100644 index 0000000000000..6166b8843265e --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/with_early_late_annotations.go @@ -0,0 +1,167 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "fmt" + "net" + "net/http" + "strings" + "time" + + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/endpoints/request" + clockutils "k8s.io/utils/clock" + netutils "k8s.io/utils/net" +) + +type lifecycleEvent interface { + // Name returns the name of the signal, useful for logging. + Name() string + + // Signaled returns a channel that is closed when the underlying event + // has been signaled. Successive calls to Signaled return the same value. + Signaled() <-chan struct{} + + // SignaledAt returns the time the event was signaled. If SignaledAt is + // invoked before the event is signaled nil will be returned. + SignaledAt() *time.Time +} + +type shouldExemptFunc func(*http.Request) bool + +var ( + // the health probes are not annotated by default + healthProbes = []string{ + "/readyz", + "/healthz", + "/livez", + } +) + +func exemptIfHealthProbe(r *http.Request) bool { + path := "/" + strings.TrimLeft(r.URL.Path, "/") + for _, probe := range healthProbes { + if path == probe { + return true + } + } + return false +} + +// WithShutdownResponseHeader, if added to the handler chain, adds a header +// 'X-OpenShift-Disruption' to the response with the following information: +// +// shutdown={true|false} shutdown-delay-duration=%s elapsed=%s host=%s +// shutdown: whether the server is currently shutting down gracefully. +// shutdown-delay-duration: value of --shutdown-delay-duration server run option +// elapsed: how much time has elapsed since the server received a TERM signal +// host: host name of the server, it is used to identify the server instance +// from the others. +// +// This handler will add the response header only if the client opts in by +// adding the 'X-Openshift-If-Disruption' header to the request. +func WithShutdownResponseHeader(handler http.Handler, shutdownInitiated lifecycleEvent, delayDuration time.Duration, apiServerID string) http.Handler { + return withShutdownResponseHeader(handler, shutdownInitiated, delayDuration, apiServerID, clockutils.RealClock{}) +} + +// WithStartupEarlyAnnotation annotates the request with an annotation keyed as +// 'apiserver.k8s.io/startup' if the request arrives early (the server is not +// fully initialized yet). It should be placed after (in order of execution) +// the 'WithAuthentication' filter. +func WithStartupEarlyAnnotation(handler http.Handler, hasBeenReady lifecycleEvent) http.Handler { + return withStartupEarlyAnnotation(handler, hasBeenReady, exemptIfHealthProbe) +} + +func withShutdownResponseHeader(handler http.Handler, shutdownInitiated lifecycleEvent, delayDuration time.Duration, apiServerID string, clock clockutils.PassiveClock) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if len(req.Header.Get("X-Openshift-If-Disruption")) == 0 { + handler.ServeHTTP(w, req) + return + } + + msgFn := func(shutdown bool, elapsed time.Duration) string { + return fmt.Sprintf("shutdown=%t shutdown-delay-duration=%s elapsed=%s host=%s", + shutdown, delayDuration.Round(time.Second).String(), elapsed.Round(time.Second).String(), apiServerID) + } + + select { + case <-shutdownInitiated.Signaled(): + default: + w.Header().Set("X-OpenShift-Disruption", msgFn(false, time.Duration(0))) + handler.ServeHTTP(w, req) + return + } + + shutdownInitiatedAt := shutdownInitiated.SignaledAt() + if shutdownInitiatedAt == nil { + w.Header().Set("X-OpenShift-Disruption", msgFn(true, time.Duration(0))) + handler.ServeHTTP(w, req) + return + } + + w.Header().Set("X-OpenShift-Disruption", msgFn(true, clock.Since(*shutdownInitiatedAt))) + handler.ServeHTTP(w, req) + }) +} + +func withStartupEarlyAnnotation(handler http.Handler, hasBeenReady lifecycleEvent, shouldExemptFn shouldExemptFunc) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + select { + case <-hasBeenReady.Signaled(): + handler.ServeHTTP(w, req) + return + default: + } + + // NOTE: some upstream unit tests have authentication disabled and will + // fail if we require the requestor to be present in the request + // context. Fixing those unit tests will increase the chance of merge + // conflict during rebase. + // This also implies that this filter must be placed after (in order of + // execution) the 'WithAuthentication' filter. + self := "self=" + if requestor, exists := request.UserFrom(req.Context()); exists && requestor != nil { + if requestor.GetName() == user.APIServerUser { + handler.ServeHTTP(w, req) + return + } + self = fmt.Sprintf("%s%t", self, false) + } + + audit.AddAuditAnnotation(req.Context(), "apiserver.k8s.io/startup", + fmt.Sprintf("early=true %s loopback=%t", self, isLoopback(req.RemoteAddr))) + + handler.ServeHTTP(w, req) + }) +} + +func isLoopback(address string) bool { + host, _, err := net.SplitHostPort(address) + if err != nil { + // if the address is missing a port, SplitHostPort will return an error + // with an empty host, and port value. For such an error, we should + // continue and try to parse the original address. + host = address + } + if ip := netutils.ParseIPSloppy(host); ip != nil { + return ip.IsLoopback() + } + + return false +} diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/with_early_late_annotations_test.go b/staging/src/k8s.io/apiserver/pkg/server/filters/with_early_late_annotations_test.go new file mode 100644 index 0000000000000..152a5c377dea3 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/with_early_late_annotations_test.go @@ -0,0 +1,384 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/audit" + authenticationuser "k8s.io/apiserver/pkg/authentication/user" + apirequest "k8s.io/apiserver/pkg/endpoints/request" + utilsclock "k8s.io/utils/clock" + clocktesting "k8s.io/utils/clock/testing" +) + +func TestWithShutdownResponseHeader(t *testing.T) { + var ( + signaledAt = time.Now() + elapsedAt = signaledAt.Add(20 * time.Second) + ) + + tests := []struct { + name string + optIn bool + shutdownInitiated func() lifecycleEvent + delayDuration time.Duration + clock func() utilsclock.PassiveClock + handlerInvoked int + statusCodeExpected int + responseHeader string + }{ + { + name: "client did not opt in", + shutdownInitiated: func() lifecycleEvent { + return nil + }, + handlerInvoked: 1, + statusCodeExpected: http.StatusOK, + }, + { + name: "client opted in, shutdown not initiated", + optIn: true, + shutdownInitiated: func() lifecycleEvent { + return fakeLifecycleSignal{ch: make(chan struct{})} + }, + delayDuration: 10 * time.Second, + handlerInvoked: 1, + statusCodeExpected: http.StatusOK, + responseHeader: "shutdown=false shutdown-delay-duration=10s elapsed=0s host=foo", + }, + { + name: "client opted in, shutdown initiated, signaled at is nil", + optIn: true, + delayDuration: 10 * time.Second, + shutdownInitiated: func() lifecycleEvent { + return fakeLifecycleSignal{ch: newClosedChannel(), at: nil} + }, + handlerInvoked: 1, + statusCodeExpected: http.StatusOK, + responseHeader: "shutdown=true shutdown-delay-duration=10s elapsed=0s host=foo", + }, + { + name: "client opted in, shutdown initiated, signaled at is nil", + optIn: true, + delayDuration: 10 * time.Second, + shutdownInitiated: func() lifecycleEvent { + return fakeLifecycleSignal{ch: newClosedChannel(), at: nil} + }, + handlerInvoked: 1, + statusCodeExpected: http.StatusOK, + responseHeader: "shutdown=true shutdown-delay-duration=10s elapsed=0s host=foo", + }, + { + name: "client opted in, shutdown delay duration is zero", + optIn: true, + delayDuration: 0, + shutdownInitiated: func() lifecycleEvent { + return fakeLifecycleSignal{ch: newClosedChannel(), at: &signaledAt} + }, + clock: func() utilsclock.PassiveClock { + return clocktesting.NewFakeClock(elapsedAt) + }, + handlerInvoked: 1, + statusCodeExpected: http.StatusOK, + responseHeader: "shutdown=true shutdown-delay-duration=0s elapsed=20s host=foo", + }, + { + name: "client opted in, shutdown initiated, signaled at is valied", + optIn: true, + delayDuration: 10 * time.Second, + shutdownInitiated: func() lifecycleEvent { + return fakeLifecycleSignal{ch: newClosedChannel(), at: &signaledAt} + }, + clock: func() utilsclock.PassiveClock { + return clocktesting.NewFakeClock(elapsedAt) + }, + handlerInvoked: 1, + statusCodeExpected: http.StatusOK, + responseHeader: "shutdown=true shutdown-delay-duration=10s elapsed=20s host=foo", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var handlerInvoked int + handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + handlerInvoked++ + w.WriteHeader(http.StatusOK) + }) + + event := test.shutdownInitiated() + var clock utilsclock.PassiveClock = utilsclock.RealClock{} + if test.clock != nil { + clock = test.clock() + } + target := withShutdownResponseHeader(handler, event, test.delayDuration, "foo", clock) + + req, err := http.NewRequest(http.MethodGet, "/api/v1/namespaces", nil) + if err != nil { + t.Fatalf("failed to create new http request - %v", err) + } + if test.optIn { + req.Header.Set("X-Openshift-If-Disruption", "true") + } + + w := httptest.NewRecorder() + w.Code = 0 + target.ServeHTTP(w, req) + + if test.handlerInvoked != handlerInvoked { + t.Errorf("expected the handler to be invoked: %d timed, but got: %d", test.handlerInvoked, handlerInvoked) + } + if test.statusCodeExpected != w.Result().StatusCode { + t.Errorf("expected status code: %d, but got: %d", test.statusCodeExpected, w.Result().StatusCode) + } + + key := "X-OpenShift-Disruption" + switch { + case len(test.responseHeader) == 0: + if valueGot := w.Header().Get(key); len(valueGot) > 0 { + t.Errorf("did not expect header to be added to the response, but got: %s", valueGot) + } + default: + if valueGot := w.Header().Get(key); len(valueGot) == 0 || test.responseHeader != valueGot { + t.Logf("got: %s", valueGot) + t.Errorf("expected response header to match, diff: %s", cmp.Diff(test.responseHeader, valueGot)) + } + } + }) + } +} + +func TestWithStartupEarlyAnnotation(t *testing.T) { + tests := []struct { + name string + readySignalFn func() lifecycleEvent + user authenticationuser.Info + remoteAddr string + handlerInvoked int + statusCodeExpected int + annotationExpected string + }{ + { + name: "server is ready", + readySignalFn: func() lifecycleEvent { + return fakeLifecycleSignal{ch: newClosedChannel()} + }, + handlerInvoked: 1, + statusCodeExpected: http.StatusOK, + }, + { + name: "server not ready, no user in request context", + readySignalFn: func() lifecycleEvent { + return fakeLifecycleSignal{ch: make(chan struct{})} + }, + handlerInvoked: 1, + statusCodeExpected: http.StatusOK, + annotationExpected: "early=true self= loopback=false", + }, + { + name: "server not ready, self is true, not annotated", + readySignalFn: func() lifecycleEvent { + return fakeLifecycleSignal{ch: make(chan struct{})} + }, + user: &authenticationuser.DefaultInfo{Name: authenticationuser.APIServerUser}, + handlerInvoked: 1, + statusCodeExpected: http.StatusOK, + }, + { + name: "server not ready, self is false, request is annotated", + readySignalFn: func() lifecycleEvent { + return fakeLifecycleSignal{ch: make(chan struct{})} + }, + user: &authenticationuser.DefaultInfo{Name: authenticationuser.Anonymous}, + handlerInvoked: 1, + statusCodeExpected: http.StatusOK, + annotationExpected: "early=true self=false loopback=false", + }, + { + name: "server not ready, self is false, looback is true, request is annotated", + readySignalFn: func() lifecycleEvent { + return fakeLifecycleSignal{ch: make(chan struct{})} + }, + user: &authenticationuser.DefaultInfo{Name: authenticationuser.Anonymous}, + remoteAddr: "127.0.0.1:8080", + handlerInvoked: 1, + statusCodeExpected: http.StatusOK, + annotationExpected: "early=true self=false loopback=true", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var handlerInvoked int + handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + handlerInvoked++ + w.WriteHeader(http.StatusOK) + }) + + event := test.readySignalFn() + target := WithStartupEarlyAnnotation(handler, event) + + req, err := http.NewRequest(http.MethodGet, "/api/v1/namespaces", nil) + if err != nil { + t.Fatalf("failed to create new http request - %v", err) + } + if test.remoteAddr != "" { + req.RemoteAddr = test.remoteAddr + } + + ctx := req.Context() + if test.user != nil { + ctx = apirequest.WithUser(ctx, test.user) + } + ctx = audit.WithAuditContext(ctx) + req = req.WithContext(ctx) + + ac := audit.AuditContextFrom(req.Context()) + if ac == nil { + t.Fatalf("expected audit context inside the request context") + } + ac.Event = auditinternal.Event{ + Level: auditinternal.LevelMetadata, + } + + w := httptest.NewRecorder() + w.Code = 0 + target.ServeHTTP(w, req) + + if test.handlerInvoked != handlerInvoked { + t.Errorf("expected the handler to be invoked: %d timed, but got: %d", test.handlerInvoked, handlerInvoked) + } + if test.statusCodeExpected != w.Result().StatusCode { + t.Errorf("expected status code: %d, but got: %d", test.statusCodeExpected, w.Result().StatusCode) + } + + key := "apiserver.k8s.io/startup" + switch { + case len(test.annotationExpected) == 0: + if valueGot, ok := ac.Event.Annotations[key]; ok { + t.Errorf("did not expect annotation to be added, but got: %s", valueGot) + } + default: + if valueGot, ok := ac.Event.Annotations[key]; !ok || test.annotationExpected != valueGot { + t.Errorf("expected annotation: %s, but got: %s", test.annotationExpected, valueGot) + } + } + }) + } +} + +func TestIsLoopback(t *testing.T) { + tests := []struct { + address string + want bool + }{ + { + address: "www.foo.bar:80", + want: false, + }, + { + address: "www.foo.bar", + want: false, + }, + { + address: "127.0.0.1:8080", + want: true, + }, + { + address: "127.0.0.1", + want: true, + }, + { + address: "192.168.0.1", + want: false, + }, + // localhost does not work + { + address: "localhost:8080", + want: false, + }, + { + address: "localhost", + want: false, + }, + } + + for _, test := range tests { + t.Run(test.address, func(t *testing.T) { + if got := isLoopback(test.address); test.want != got { + t.Errorf("expected isLoopback to return: %t, but got: %t", test.want, got) + } + }) + } +} + +func TestExemptIfHealthProbe(t *testing.T) { + tests := []struct { + path string + exempt bool + }{ + { + path: "/apis/v1/foo/bar", + exempt: false, + }, + { + path: "/readyz", + exempt: true, + }, + { + path: "http://foo.bar///healthz?verbose=1", + exempt: true, + }, + { + path: "/livez", + exempt: true, + }, + } + + for _, test := range tests { + t.Run(test.path, func(t *testing.T) { + req, err := http.NewRequest(http.MethodGet, test.path, nil) + if err != nil { + t.Fatalf("failed to create new http request - %v", err) + } + if got := exemptIfHealthProbe(req); test.exempt != got { + t.Errorf("expected exemptIfHealthProbe to return: %t, but got: %t", test.exempt, got) + } + }) + } +} + +type fakeLifecycleSignal struct { + ch <-chan struct{} + at *time.Time +} + +func (s fakeLifecycleSignal) Name() string { return "initiated" } +func (s fakeLifecycleSignal) Signaled() <-chan struct{} { return s.ch } +func (s fakeLifecycleSignal) SignaledAt() *time.Time { return s.at } + +func newClosedChannel() <-chan struct{} { + ch := make(chan struct{}) + close(ch) + return ch +} diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/with_not_ready_patch.go b/staging/src/k8s.io/apiserver/pkg/server/filters/with_not_ready_patch.go new file mode 100644 index 0000000000000..0915d8c661125 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/with_not_ready_patch.go @@ -0,0 +1,92 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "errors" + "k8s.io/apiserver/pkg/warning" + "net/http" + + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + "k8s.io/apiserver/pkg/endpoints/request" +) + +const ( + // notReadyDebuggerGroup facilitates debugging if the apiserver takes longer + // to initilize. All request(s) from this designated group will be allowed + // while the apiserver is being initialized. + // The apiserver will reject all incoming requests with a 'Retry-After' + // response header until it has fully initialized, except for + // requests from this special debugger group. + notReadyDebuggerGroup = "system:openshift:risky-not-ready-microshift-debugging-group" +) + +// WithNotReady rejects any incoming new request(s) with a 'Retry-After' +// response if the specified hasBeenReadyCh channel is still open, with +// the following exceptions: +// - all request(s) from the designated debugger group is exempt, this +// helps debug the apiserver if it takes longer to initialize. +// - local loopback requests (this exempts system:apiserver) +// - /healthz, /livez, /readyz, /metrics are exempt +// +// It includes new request(s) on a new or an existing TCP connection +// Any new request(s) arriving before hasBeenreadyCh is closed +// are replied with a 503 and the following response headers: +// - 'Retry-After: N` (so client can retry after N seconds) +func WithNotReady(handler http.Handler, hasBeenReadyCh <-chan struct{}) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + select { + case <-hasBeenReadyCh: + handler.ServeHTTP(w, req) + return + default: + } + + requestor, exists := request.UserFrom(req.Context()) + if !exists { + responsewriters.InternalError(w, req, errors.New("no user found for request")) + return + } + + // make sure we exempt: + // - local loopback requests (this exempts system:apiserver) + // - health probes and metric scraping + // - requests from the exempt debugger group. + if requestor.GetName() == user.APIServerUser || + hasExemptPathPrefix(req) || + matchesDebuggerGroup(requestor, notReadyDebuggerGroup) { + warning.AddWarning(req.Context(), "", "The apiserver was still initializing, while this request was being served") + handler.ServeHTTP(w, req) + return + } + + // Return a 503 status asking the client to try again after 5 seconds + w.Header().Set("Retry-After", "5") + http.Error(w, "The apiserver hasn't been fully initialized yet, please try again later.", + http.StatusServiceUnavailable) + }) +} + +func matchesDebuggerGroup(requestor user.Info, debugger string) bool { + for _, group := range requestor.GetGroups() { + if group == debugger { + return true + } + } + return false +} diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/with_not_ready_patch_test.go b/staging/src/k8s.io/apiserver/pkg/server/filters/with_not_ready_patch_test.go new file mode 100644 index 0000000000000..f5d1b5c611139 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/with_not_ready_patch_test.go @@ -0,0 +1,143 @@ +package filters + +import ( + "net/http" + "net/http/httptest" + "testing" + + "k8s.io/apiserver/pkg/authentication/user" + genericapifilters "k8s.io/apiserver/pkg/endpoints/filters" + "k8s.io/apiserver/pkg/endpoints/request" +) + +func TestWithNotReady(t *testing.T) { + const warning = `299 - "The apiserver was still initializing, while this request was being served"` + + tests := []struct { + name string + requestURL string + hasBeenReady bool + user *user.DefaultInfo + handlerInvoked int + retryAfterExpected string + warningExpected string + statusCodeexpected int + }{ + { + name: "the apiserver is fully initialized", + hasBeenReady: true, + handlerInvoked: 1, + statusCodeexpected: http.StatusOK, + }, + { + name: "the apiserver is initializing, local loopback", + hasBeenReady: false, + user: &user.DefaultInfo{Name: user.APIServerUser}, + handlerInvoked: 1, + statusCodeexpected: http.StatusOK, + warningExpected: warning, + }, + { + name: "the apiserver is initializing, exempt debugger group", + hasBeenReady: false, + user: &user.DefaultInfo{Groups: []string{"system:authenticated", notReadyDebuggerGroup}}, + handlerInvoked: 1, + statusCodeexpected: http.StatusOK, + warningExpected: warning, + }, + { + name: "the apiserver is initializing, readyz", + requestURL: "/readyz?verbose=1", + user: &user.DefaultInfo{}, + hasBeenReady: false, + handlerInvoked: 1, + statusCodeexpected: http.StatusOK, + warningExpected: warning, + }, + { + name: "the apiserver is initializing, healthz", + requestURL: "/healthz?verbose=1", + user: &user.DefaultInfo{}, + hasBeenReady: false, + handlerInvoked: 1, + statusCodeexpected: http.StatusOK, + warningExpected: warning, + }, + { + name: "the apiserver is initializing, livez", + requestURL: "/livez?verbose=1", + user: &user.DefaultInfo{}, + hasBeenReady: false, + handlerInvoked: 1, + statusCodeexpected: http.StatusOK, + warningExpected: warning, + }, + { + name: "the apiserver is initializing, metrics", + requestURL: "/metrics", + user: &user.DefaultInfo{}, + hasBeenReady: false, + handlerInvoked: 1, + statusCodeexpected: http.StatusOK, + warningExpected: warning, + }, + { + name: "the apiserver is initializing, non-exempt request", + hasBeenReady: false, + user: &user.DefaultInfo{Groups: []string{"system:authenticated", "system:masters"}}, + statusCodeexpected: http.StatusServiceUnavailable, + retryAfterExpected: "5", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + hasBeenReadyCh := make(chan struct{}) + if test.hasBeenReady { + close(hasBeenReadyCh) + } else { + defer close(hasBeenReadyCh) + } + + var handlerInvoked int + handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + handlerInvoked++ + w.WriteHeader(http.StatusOK) + }) + + if len(test.requestURL) == 0 { + test.requestURL = "/api/v1/namespaces" + } + req, err := http.NewRequest(http.MethodGet, test.requestURL, nil) + if err != nil { + t.Fatalf("failed to create new http request - %v", err) + } + if test.user != nil { + req = req.WithContext(request.WithUser(req.Context(), test.user)) + } + w := httptest.NewRecorder() + + withNotReady := WithNotReady(handler, hasBeenReadyCh) + withNotReady = genericapifilters.WithWarningRecorder(withNotReady) + withNotReady.ServeHTTP(w, req) + + if test.handlerInvoked != handlerInvoked { + t.Errorf("expected the handler to be invoked: %d times, but got: %d", test.handlerInvoked, handlerInvoked) + } + if test.statusCodeexpected != w.Code { + t.Errorf("expected Response Status Code: %d, but got: %d", test.statusCodeexpected, w.Code) + } + + retryAfterGot := w.Header().Get("Retry-After") + if test.retryAfterExpected != retryAfterGot { + t.Errorf("expected Retry-After: %q, but got: %q", test.retryAfterExpected, retryAfterGot) + } + + warningGot := w.Header().Get("Warning") + if test.warningExpected != warningGot { + t.Errorf("expected Warning: %s, but got: %s", test.warningExpected, warningGot) + } + + }) + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/wrap.go b/staging/src/k8s.io/apiserver/pkg/server/filters/wrap.go index 73ce270260498..1e48cd5b47420 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/filters/wrap.go +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/wrap.go @@ -59,8 +59,8 @@ func WithPanicRecovery(handler http.Handler, resolver request.RequestInfoResolve } // WithHTTPLogging enables logging of incoming requests. -func WithHTTPLogging(handler http.Handler) http.Handler { - return httplog.WithLogging(handler, httplog.DefaultStacktracePred) +func WithHTTPLogging(handler http.Handler, isTerminating func() bool) http.Handler { + return httplog.WithLogging(handler, httplog.DefaultStacktracePred, isTerminating) } func withPanicRecovery(handler http.Handler, crashHandler func(http.ResponseWriter, *http.Request, interface{})) http.Handler { diff --git a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go index e810a46087950..8a98cbf020182 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go +++ b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go @@ -30,6 +30,7 @@ import ( "golang.org/x/time/rate" apidiscoveryv2 "k8s.io/api/apidiscovery/v2" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -285,6 +286,9 @@ type GenericAPIServer struct { // This grace period is orthogonal to other grace periods, and // it is not overridden by any other grace period. ShutdownWatchTerminationGracePeriod time.Duration + + // OpenShift patch + OpenShiftGenericAPIServerPatch } // DelegationTarget is an interface which allows for composition of API servers with top level handling that works @@ -537,7 +541,10 @@ func (s preparedGenericAPIServer) RunWithContext(ctx context.Context) error { go func() { defer delayedStopCh.Signal() - defer klog.V(1).InfoS("[graceful-termination] shutdown event", "name", delayedStopCh.Name()) + defer func() { + klog.V(1).InfoS("[graceful-termination] shutdown event", "name", delayedStopCh.Name()) + s.Eventf(corev1.EventTypeNormal, delayedStopCh.Name(), "The minimal shutdown duration of %v finished", s.ShutdownDelayDuration) + }() <-stopCh @@ -546,10 +553,28 @@ func (s preparedGenericAPIServer) RunWithContext(ctx context.Context) error { // and stop sending traffic to this server. shutdownInitiatedCh.Signal() klog.V(1).InfoS("[graceful-termination] shutdown event", "name", shutdownInitiatedCh.Name()) + s.Eventf(corev1.EventTypeNormal, shutdownInitiatedCh.Name(), "Received signal to terminate, becoming unready, but keeping serving") time.Sleep(s.ShutdownDelayDuration) }() + lateStopCh := make(chan struct{}) + if s.ShutdownDelayDuration > 0 { + go func() { + defer close(lateStopCh) + + <-stopCh + + time.Sleep(s.ShutdownDelayDuration * 8 / 10) + }() + } + + s.SecureServingInfo.Listener = &terminationLoggingListener{ + Listener: s.SecureServingInfo.Listener, + lateStopCh: lateStopCh, + } + unexpectedRequestsEventf.Store(s.Eventf) + // close socket after delayed stopCh shutdownTimeout := s.ShutdownTimeout if s.ShutdownSendRetryAfter { @@ -598,13 +623,17 @@ func (s preparedGenericAPIServer) RunWithContext(ctx context.Context) error { <-listenerStoppedCh httpServerStoppedListeningCh.Signal() klog.V(1).InfoS("[graceful-termination] shutdown event", "name", httpServerStoppedListeningCh.Name()) + s.Eventf(corev1.EventTypeNormal, httpServerStoppedListeningCh.Name(), "HTTP Server has stopped listening") }() // we don't accept new request as soon as both ShutdownDelayDuration has // elapsed and preshutdown hooks have completed. preShutdownHooksHasStoppedCh := s.lifecycleSignals.PreShutdownHooksStopped go func() { - defer klog.V(1).InfoS("[graceful-termination] shutdown event", "name", notAcceptingNewRequestCh.Name()) + defer func() { + klog.V(1).InfoS("[graceful-termination] shutdown event", "name", notAcceptingNewRequestCh.Name()) + s.Eventf(corev1.EventTypeNormal, drainedCh.Name(), "All non long-running request(s) in-flight have drained") + }() defer notAcceptingNewRequestCh.Signal() // wait for the delayed stopCh before closing the handler chain @@ -691,6 +720,7 @@ func (s preparedGenericAPIServer) RunWithContext(ctx context.Context) error { defer func() { preShutdownHooksHasStoppedCh.Signal() klog.V(1).InfoS("[graceful-termination] pre-shutdown hooks completed", "name", preShutdownHooksHasStoppedCh.Name()) + s.Eventf(corev1.EventTypeNormal, "TerminationPreShutdownHooksFinished", "All pre-shutdown hooks have been finished") }() err = s.RunPreShutdownHooks() }() @@ -711,6 +741,8 @@ func (s preparedGenericAPIServer) RunWithContext(ctx context.Context) error { <-stoppedCh klog.V(1).Info("[graceful-termination] apiserver is exiting") + s.Eventf(corev1.EventTypeNormal, "TerminationGracefulTerminationFinished", "All pending requests processed") + return nil } diff --git a/staging/src/k8s.io/apiserver/pkg/server/healthz/healthz_test.go b/staging/src/k8s.io/apiserver/pkg/server/healthz/healthz_test.go index 2e53aad30b075..31b38ff094e15 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/healthz/healthz_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/healthz/healthz_test.go @@ -255,9 +255,9 @@ func TestMetrics(t *testing.T) { expected := strings.NewReader(` # HELP apiserver_request_total [STABLE] Counter of apiserver requests broken out for each verb, dry run value, group, version, resource, scope, component, and HTTP response code. # TYPE apiserver_request_total counter - apiserver_request_total{code="200",component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version=""} 1 - apiserver_request_total{code="200",component="",dry_run="",group="",resource="",scope="",subresource="/livez",verb="GET",version=""} 1 - apiserver_request_total{code="200",component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version=""} 1 + apiserver_request_total{code="200",component="",dry_run="",group="",resource="",scope="",subresource="/healthz",system_client="",verb="GET",version=""} 1 + apiserver_request_total{code="200",component="",dry_run="",group="",resource="",scope="",subresource="/livez",system_client="",verb="GET",version=""} 1 + apiserver_request_total{code="200",component="",dry_run="",group="",resource="",scope="",subresource="/readyz",system_client="",verb="GET",version=""} 1 `) if err := testutil.GatherAndCompare(legacyregistry.DefaultGatherer, expected, "apiserver_request_total"); err != nil { t.Error(err) diff --git a/staging/src/k8s.io/apiserver/pkg/server/httplog/httplog.go b/staging/src/k8s.io/apiserver/pkg/server/httplog/httplog.go index c64f5771d8ba6..299d543b1ea34 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/httplog/httplog.go +++ b/staging/src/k8s.io/apiserver/pkg/server/httplog/httplog.go @@ -67,6 +67,7 @@ type respLogger struct { addedInfo strings.Builder addedKeyValuePairs []interface{} startTime time.Time + isTerminating bool captureErrorOutput bool @@ -100,13 +101,13 @@ func DefaultStacktracePred(status int) bool { const withLoggingLevel = 3 // WithLogging wraps the handler with logging. -func WithLogging(handler http.Handler, pred StacktracePred) http.Handler { +func WithLogging(handler http.Handler, pred StacktracePred, isTerminatingFn func() bool) http.Handler { return withLogging(handler, pred, func() bool { return klog.V(withLoggingLevel).Enabled() - }) + }, isTerminatingFn) } -func withLogging(handler http.Handler, stackTracePred StacktracePred, shouldLogRequest ShouldLogRequestPred) http.Handler { +func withLogging(handler http.Handler, stackTracePred StacktracePred, shouldLogRequest ShouldLogRequestPred, isTerminatingFn func() bool) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { if !shouldLogRequest() { handler.ServeHTTP(w, req) @@ -117,14 +118,16 @@ func withLogging(handler http.Handler, stackTracePred StacktracePred, shouldLogR if old := respLoggerFromRequest(req); old != nil { panic("multiple WithLogging calls!") } - startTime := time.Now() if receivedTimestamp, ok := request.ReceivedTimestampFrom(ctx); ok { startTime = receivedTimestamp } - rl := newLoggedWithStartTime(req, w, startTime) - rl.StacktraceWhen(stackTracePred) + isTerminating := false + if isTerminatingFn != nil { + isTerminating = isTerminatingFn() + } + rl := newLoggedWithStartTime(req, w, startTime).StacktraceWhen(stackTracePred).IsTerminating(isTerminating) req = req.WithContext(context.WithValue(ctx, respLoggerContextKey, rl)) var logFunc func() @@ -135,6 +138,9 @@ func withLogging(handler http.Handler, stackTracePred StacktracePred, shouldLogR } }() + if klog.V(3).Enabled() || (rl.isTerminating && klog.V(1).Enabled()) { + defer rl.Log() + } w = responsewriter.WrapForHTTP1Or2(rl) handler.ServeHTTP(w, req) @@ -205,6 +211,12 @@ func (rl *respLogger) StacktraceWhen(pred StacktracePred) *respLogger { return rl } +// IsTerminating informs the logger that the server is terminating. +func (rl *respLogger) IsTerminating(is bool) *respLogger { + rl.isTerminating = is + return rl +} + // StatusIsNot returns a StacktracePred which will cause stacktraces to be logged // for any status *not* in the given list. func StatusIsNot(statuses ...int) StacktracePred { diff --git a/staging/src/k8s.io/apiserver/pkg/server/httplog/httplog_test.go b/staging/src/k8s.io/apiserver/pkg/server/httplog/httplog_test.go index 113825ac0b0a9..61b83c8c975eb 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/httplog/httplog_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/httplog/httplog_test.go @@ -67,7 +67,7 @@ func TestWithLogging(t *testing.T) { shouldLogRequest := func() bool { return true } var handler http.Handler handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) - handler = withLogging(withLogging(handler, DefaultStacktracePred, shouldLogRequest), DefaultStacktracePred, shouldLogRequest) + handler = withLogging(withLogging(handler, DefaultStacktracePred, shouldLogRequest, nil), DefaultStacktracePred, shouldLogRequest, nil) func() { defer func() { @@ -111,7 +111,7 @@ func TestLogOf(t *testing.T) { t.Errorf("Expected %v, got %v", test.want, got) } }) - handler = withLogging(handler, DefaultStacktracePred, func() bool { return test.shouldLogRequest }) + handler = withLogging(handler, DefaultStacktracePred, func() bool { return test.shouldLogRequest }, nil) w := httptest.NewRecorder() handler.ServeHTTP(w, req) }) @@ -135,7 +135,7 @@ func TestUnlogged(t *testing.T) { } }) if makeLogger { - handler = WithLogging(handler, DefaultStacktracePred) + handler = WithLogging(handler, DefaultStacktracePred, nil) } handler.ServeHTTP(origWriter, req) @@ -216,7 +216,7 @@ func TestRespLoggerWithDecoratedResponseWriter(t *testing.T) { } }) - handler = withLogging(handler, DefaultStacktracePred, func() bool { return true }) + handler = withLogging(handler, DefaultStacktracePred, func() bool { return true }, nil) handler.ServeHTTP(test.r(), req) }) } diff --git a/staging/src/k8s.io/apiserver/pkg/server/lifecycle_signals.go b/staging/src/k8s.io/apiserver/pkg/server/lifecycle_signals.go index e71de4d87cdbb..7d14e207c0f7a 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/lifecycle_signals.go +++ b/staging/src/k8s.io/apiserver/pkg/server/lifecycle_signals.go @@ -18,6 +18,10 @@ package server import ( "sync" + "sync/atomic" + "time" + + utilsclock "k8s.io/utils/clock" ) /* @@ -100,6 +104,10 @@ type lifecycleSignal interface { // Name returns the name of the signal, useful for logging. Name() string + + // SignaledAt returns the time the event was signaled. If SignaledAt is + // invoked before the event is signaled nil will be returned. + SignaledAt() *time.Time } // lifecycleSignals provides an abstraction of the events that @@ -157,23 +165,25 @@ func (s lifecycleSignals) ShuttingDown() <-chan struct{} { // newLifecycleSignals returns an instance of lifecycleSignals interface to be used // to coordinate lifecycle of the apiserver func newLifecycleSignals() lifecycleSignals { + clock := utilsclock.RealClock{} return lifecycleSignals{ - ShutdownInitiated: newNamedChannelWrapper("ShutdownInitiated"), - AfterShutdownDelayDuration: newNamedChannelWrapper("AfterShutdownDelayDuration"), - PreShutdownHooksStopped: newNamedChannelWrapper("PreShutdownHooksStopped"), - NotAcceptingNewRequest: newNamedChannelWrapper("NotAcceptingNewRequest"), - InFlightRequestsDrained: newNamedChannelWrapper("InFlightRequestsDrained"), - HTTPServerStoppedListening: newNamedChannelWrapper("HTTPServerStoppedListening"), - HasBeenReady: newNamedChannelWrapper("HasBeenReady"), - MuxAndDiscoveryComplete: newNamedChannelWrapper("MuxAndDiscoveryComplete"), + ShutdownInitiated: newNamedChannelWrapper("ShutdownInitiated", clock), + AfterShutdownDelayDuration: newNamedChannelWrapper("AfterShutdownDelayDuration", clock), + PreShutdownHooksStopped: newNamedChannelWrapper("PreShutdownHooksStopped", clock), + NotAcceptingNewRequest: newNamedChannelWrapper("NotAcceptingNewRequest", clock), + InFlightRequestsDrained: newNamedChannelWrapper("InFlightRequestsDrained", clock), + HTTPServerStoppedListening: newNamedChannelWrapper("HTTPServerStoppedListening", clock), + HasBeenReady: newNamedChannelWrapper("HasBeenReady", clock), + MuxAndDiscoveryComplete: newNamedChannelWrapper("MuxAndDiscoveryComplete", clock), } } -func newNamedChannelWrapper(name string) lifecycleSignal { +func newNamedChannelWrapper(name string, clock utilsclock.PassiveClock) lifecycleSignal { return &namedChannelWrapper{ - name: name, - once: sync.Once{}, - ch: make(chan struct{}), + name: name, + once: sync.Once{}, + ch: make(chan struct{}), + clock: clock, } } @@ -181,10 +191,27 @@ type namedChannelWrapper struct { name string once sync.Once ch chan struct{} + + clock utilsclock.PassiveClock + signaledAt atomic.Value } func (e *namedChannelWrapper) Signal() { e.once.Do(func() { + // set the signaledAt value first to support the expected use case: + // + // <-s.Signaled() + // .. + // at := s.SignaledAt() + // + // we guarantee that at will never be nil after the event is signaled, + // it also implies that 'SignaledAt' if used independently outside of + // the above use case, it may return a valid non-empty time (due to + // the delay between setting signaledAt and closing the channel) + // even when the event has not signaled yet. + now := e.clock.Now() + e.signaledAt.Store(&now) + close(e.ch) }) } @@ -196,3 +223,11 @@ func (e *namedChannelWrapper) Signaled() <-chan struct{} { func (e *namedChannelWrapper) Name() string { return e.name } + +func (e *namedChannelWrapper) SignaledAt() *time.Time { + value := e.signaledAt.Load() + if value == nil { + return nil + } + return value.(*time.Time) +} diff --git a/staging/src/k8s.io/apiserver/pkg/server/lifecycle_signals_test.go b/staging/src/k8s.io/apiserver/pkg/server/lifecycle_signals_test.go new file mode 100644 index 0000000000000..afc734f477c2f --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/server/lifecycle_signals_test.go @@ -0,0 +1,54 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "testing" + "time" + + clocktesting "k8s.io/utils/clock/testing" +) + +func TestLifecycleSignal(t *testing.T) { + signalName := "mysignal" + signaledAt := time.Now() + clock := clocktesting.NewFakeClock(signaledAt) + s := newNamedChannelWrapper(signalName, clock) + + if s.Name() != signalName { + t.Errorf("expected signal name to match: %q, but got: %q", signalName, s.Name()) + } + if at := s.SignaledAt(); at != nil { + t.Errorf("expected SignaledAt to return nil, but got: %v", *at) + } + select { + case <-s.Signaled(): + t.Errorf("expected the lifecycle event to not be signaled initially") + default: + } + + s.Signal() + + if at := s.SignaledAt(); at == nil || !at.Equal(signaledAt) { + t.Errorf("expected SignaledAt to return %v, but got: %v", signaledAt, at) + } + select { + case <-s.Signaled(): + default: + t.Errorf("expected the lifecycle event to be signaled") + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/admission.go b/staging/src/k8s.io/apiserver/pkg/server/options/admission.go index 6b4669e450637..f10e5ed5f380a 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/admission.go @@ -154,7 +154,7 @@ func (a *AdmissionOptions) ApplyTo( discoveryClient := cacheddiscovery.NewMemCacheClient(kubeClient.Discovery()) discoveryRESTMapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient) genericInitializer := initializer.New(kubeClient, dynamicClient, informers, c.Authorization.Authorizer, features, - c.DrainedNotify(), discoveryRESTMapper) + c.DrainedNotify(), NewAdmissionRESTMapper(discoveryRESTMapper)) initializersChain := admission.PluginInitializers{genericInitializer} initializersChain = append(initializersChain, pluginInitializers...) diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/authorization.go b/staging/src/k8s.io/apiserver/pkg/server/options/authorization.go index 9b2dcb3fff7ac..d726201fd5033 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/authorization.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/authorization.go @@ -22,6 +22,7 @@ import ( "github.com/spf13/pflag" + "github.com/openshift/library-go/pkg/authorization/hardcodedauthorizer" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/authorization/authorizerfactory" @@ -181,6 +182,9 @@ func (s *DelegatingAuthorizationOptions) toAuthorizer(client kubernetes.Interfac authorizers = append(authorizers, authorizerfactory.NewPrivilegedGroups(s.AlwaysAllowGroups...)) } + // add an authorizer to always approver the openshift metrics scraper. + authorizers = append(authorizers, hardcodedauthorizer.NewHardCodedMetricsAuthorizer()) + if len(s.AlwaysAllowPaths) > 0 { a, err := path.NewAuthorizer(s.AlwaysAllowPaths) if err != nil { @@ -239,5 +243,10 @@ func (s *DelegatingAuthorizationOptions) getClient() (kubernetes.Interface, erro clientConfig.Wrap(s.CustomRoundTripperFn) } - return kubernetes.NewForConfig(clientConfig) + // make the client use protobuf + protoConfig := rest.CopyConfig(clientConfig) + protoConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" + protoConfig.ContentType = "application/vnd.kubernetes.protobuf" + + return kubernetes.NewForConfig(protoConfig) } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/patch_restmapper.go b/staging/src/k8s.io/apiserver/pkg/server/options/patch_restmapper.go new file mode 100644 index 0000000000000..bce6453ee5fc2 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/server/options/patch_restmapper.go @@ -0,0 +1,11 @@ +package options + +import ( + "k8s.io/apimachinery/pkg/api/meta" + + "github.com/openshift/library-go/pkg/client/openshiftrestmapper" +) + +func NewAdmissionRESTMapper(delegate meta.RESTMapper) meta.RESTMapper { + return openshiftrestmapper.NewOpenShiftHardcodedRESTMapper(delegate) +} diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/server_run_options.go b/staging/src/k8s.io/apiserver/pkg/server/options/server_run_options.go index a4d31ef926e6f..ce2834924de70 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/server_run_options.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/server_run_options.go @@ -98,6 +98,18 @@ type ServerRunOptions struct { ComponentGlobalsRegistry featuregate.ComponentGlobalsRegistry // ComponentName is name under which the server's global variabled are registered in the ComponentGlobalsRegistry. ComponentName string + + // SendRetryAfterWhileNotReadyOnce, if enabled, the apiserver will + // reject all incoming requests with a 503 status code and a + // 'Retry-After' response header until the apiserver has fully + // initialized, except for requests from a designated debugger group. + // This option ensures that the system stays consistent even when + // requests are received before the server has been initialized. + // In particular, it prevents child deletion in case of GC or/and + // orphaned content in case of the namespaces controller. + // NOTE: this option is applicable to Microshift only, + // this should never be enabled for OCP. + SendRetryAfterWhileNotReadyOnce bool } func NewServerRunOptions() *ServerRunOptions { @@ -126,6 +138,7 @@ func NewServerRunOptionsForComponent(componentName string, componentGlobalsRegis ShutdownSendRetryAfter: false, ComponentName: componentName, ComponentGlobalsRegistry: componentGlobalsRegistry, + SendRetryAfterWhileNotReadyOnce: false, } } @@ -152,6 +165,7 @@ func (s *ServerRunOptions) ApplyTo(c *server.Config) error { c.ShutdownWatchTerminationGracePeriod = s.ShutdownWatchTerminationGracePeriod c.EffectiveVersion = s.ComponentGlobalsRegistry.EffectiveVersionFor(s.ComponentName) c.FeatureGate = s.ComponentGlobalsRegistry.FeatureGateFor(s.ComponentName) + c.SendRetryAfterWhileNotReadyOnce = s.SendRetryAfterWhileNotReadyOnce return nil } @@ -375,6 +389,13 @@ func (s *ServerRunOptions) AddUniversalFlags(fs *pflag.FlagSet) { "This option, if set, represents the maximum amount of grace period the apiserver will wait "+ "for active watch request(s) to drain during the graceful server shutdown window.") + // NOTE: this option is applicable to Microshift only, this should never be enabled for OCP. + fs.BoolVar(&s.SendRetryAfterWhileNotReadyOnce, "send-retry-after-while-not-ready-once", s.SendRetryAfterWhileNotReadyOnce, ""+ + "If true, incoming request(s) will be rejected with a '503' status code and a 'Retry-After' response header "+ + "until the apiserver has initialized, except for requests from a certain group. This option ensures that the system stays "+ + "consistent even when requests arrive at the server before it has been initialized. "+ + "This option is applicable to Microshift only, this should never be enabled for OCP") + s.ComponentGlobalsRegistry.AddFlags(fs) } diff --git a/staging/src/k8s.io/apiserver/pkg/server/patch_config.go b/staging/src/k8s.io/apiserver/pkg/server/patch_config.go new file mode 100644 index 0000000000000..0324b3f5b4b77 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/server/patch_config.go @@ -0,0 +1,50 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +// newIsTerminatingFunc returns a 'func() bool' that relies on the +// 'ShutdownInitiated' life cycle signal of answer if the apiserver +// has started the termination process. +func (c *Config) newIsTerminatingFunc() func() bool { + var shutdownCh <-chan struct{} + // TODO: a properly initialized Config object should always have lifecycleSignals + // initialized, but some config unit tests leave lifecycleSignals as nil. + // Fix the unit tests upstream and then we can remove this check. + if c.lifecycleSignals.ShutdownInitiated != nil { + shutdownCh = c.lifecycleSignals.ShutdownInitiated.Signaled() + } + + return func() bool { + select { + case <-shutdownCh: + return true + default: + return false + } + } +} + +func (c *Config) newServerFullyInitializedFunc() func() bool { + return func() bool { + select { + case <-c.lifecycleSignals.HasBeenReady.Signaled(): + return true + default: + return false + } + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/server/patch_genericapiserver.go b/staging/src/k8s.io/apiserver/pkg/server/patch_genericapiserver.go new file mode 100644 index 0000000000000..a84494b1982db --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/server/patch_genericapiserver.go @@ -0,0 +1,289 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "strings" + "sync" + goatomic "sync/atomic" + "time" + + "go.uber.org/atomic" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apiserver/pkg/audit" + v1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/klog/v2" + netutils "k8s.io/utils/net" +) + +// EventSink allows to create events. +type EventSink interface { + Create(event *corev1.Event) (*corev1.Event, error) + Destroy() +} + +type OpenShiftGenericAPIServerPatch struct { + // EventSink creates events. + eventSink EventSink + eventRef *corev1.ObjectReference + + // when we emit the lifecycle events, we store the event ID of the first + // shutdown event "ShutdownInitiated" emitted so we can correlate it to + // the other shutdown events for a particular apiserver restart. + // This provides a more deterministic way to determine the shutdown + // duration for an apiserver restart + eventLock sync.Mutex + shutdownInitiatedEventID types.UID +} + +// Eventf creates an event with the API server as source, either in default namespace against default namespace, or +// if POD_NAME/NAMESPACE are set against that pod. +func (s *GenericAPIServer) Eventf(eventType, reason, messageFmt string, args ...interface{}) { + t := metav1.NewTime(time.Now()) + host, _ := os.Hostname() // expicitly ignore error. Empty host is fine + + ref := *s.eventRef + if len(ref.Namespace) == 0 { + ref.Namespace = "default" // TODO: event broadcaster sets event ns to default. We have to match. Odd. + } + + e := &corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), + Namespace: ref.Namespace, + }, + FirstTimestamp: t, + LastTimestamp: t, + Count: 1, + InvolvedObject: ref, + Reason: reason, + Message: fmt.Sprintf(messageFmt, args...), + Type: eventType, + Source: corev1.EventSource{Component: "apiserver", Host: host}, + } + + func() { + s.eventLock.Lock() + defer s.eventLock.Unlock() + if len(s.shutdownInitiatedEventID) != 0 { + e.Related = &corev1.ObjectReference{ + UID: s.shutdownInitiatedEventID, + } + } + }() + + klog.V(2).Infof("Event(%#v): type: '%v' reason: '%v' %v", e.InvolvedObject, e.Type, e.Reason, e.Message) + + ev, err := s.eventSink.Create(e) + if err != nil { + klog.Warningf("failed to create event %s/%s: %v", e.Namespace, e.Name, err) + return + } + + if ev != nil && ev.Reason == "ShutdownInitiated" { + // we have successfully created the shutdown initiated event, + // all consecutive shutdown events we are going to write for + // this restart can be tied to this initiated event + s.eventLock.Lock() + defer s.eventLock.Unlock() + if len(s.shutdownInitiatedEventID) == 0 { + s.shutdownInitiatedEventID = ev.GetUID() + } + } +} + +func eventReference() (*corev1.ObjectReference, error) { + ns := os.Getenv("POD_NAMESPACE") + pod := os.Getenv("POD_NAME") + if len(ns) == 0 && len(pod) > 0 { + serviceAccountNamespaceFile := "/var/run/secrets/kubernetes.io/serviceaccount/namespace" + if _, err := os.Stat(serviceAccountNamespaceFile); err == nil { + bs, err := ioutil.ReadFile(serviceAccountNamespaceFile) + if err != nil { + return nil, err + } + ns = string(bs) + } + } + if len(ns) == 0 { + pod = "" + ns = "openshift-kube-apiserver" + } + if len(pod) == 0 { + return &corev1.ObjectReference{ + Kind: "Namespace", + Name: ns, + APIVersion: "v1", + }, nil + } + + return &corev1.ObjectReference{ + Kind: "Pod", + Namespace: ns, + Name: pod, + APIVersion: "v1", + }, nil +} + +// terminationLoggingListener wraps the given listener to mark late connections +// as such, identified by the remote address. In parallel, we have a filter that +// logs bad requests through these connections. We need this filter to get +// access to the http path in order to filter out healthz or readyz probes that +// are allowed at any point during termination. +// +// Connections are late after the lateStopCh has been closed. +type terminationLoggingListener struct { + net.Listener + lateStopCh <-chan struct{} +} + +type eventfFunc func(eventType, reason, messageFmt string, args ...interface{}) + +var ( + lateConnectionRemoteAddrsLock sync.RWMutex + lateConnectionRemoteAddrs = map[string]bool{} + + unexpectedRequestsEventf goatomic.Value +) + +func (l *terminationLoggingListener) Accept() (net.Conn, error) { + c, err := l.Listener.Accept() + if err != nil { + return nil, err + } + + select { + case <-l.lateStopCh: + lateConnectionRemoteAddrsLock.Lock() + defer lateConnectionRemoteAddrsLock.Unlock() + lateConnectionRemoteAddrs[c.RemoteAddr().String()] = true + default: + } + + return c, nil +} + +// WithLateConnectionFilter logs every non-probe request that comes through a late connection identified by remote address. +func WithLateConnectionFilter(handler http.Handler) http.Handler { + var lateRequestReceived atomic.Bool + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + lateConnectionRemoteAddrsLock.RLock() + late := lateConnectionRemoteAddrs[r.RemoteAddr] + lateConnectionRemoteAddrsLock.RUnlock() + + if late { + if pth := "/" + strings.TrimLeft(r.URL.Path, "/"); pth != "/readyz" && pth != "/healthz" && pth != "/livez" { + if isLocal(r) { + audit.AddAuditAnnotation(r.Context(), "openshift.io/during-graceful", fmt.Sprintf("loopback=true,%v,readyz=false", r.URL.Host)) + klog.V(4).Infof("Loopback request to %q (user agent %q) through connection created very late in the graceful termination process (more than 80%% has passed). This client probably does not watch /readyz and might get failures when termination is over.", r.URL.Path, r.UserAgent()) + } else { + audit.AddAuditAnnotation(r.Context(), "openshift.io/during-graceful", fmt.Sprintf("loopback=false,%v,readyz=false", r.URL.Host)) + klog.Warningf("Request to %q (source IP %s, user agent %q) through a connection created very late in the graceful termination process (more than 80%% has passed), possibly a sign for a broken load balancer setup.", r.URL.Path, r.RemoteAddr, r.UserAgent()) + + // create only one event to avoid event spam. + var eventf eventfFunc + eventf, _ = unexpectedRequestsEventf.Load().(eventfFunc) + if swapped := lateRequestReceived.CAS(false, true); swapped && eventf != nil { + eventf(corev1.EventTypeWarning, "LateConnections", "The apiserver received connections (e.g. from %q, user agent %q) very late in the graceful termination process, possibly a sign for a broken load balancer setup.", r.RemoteAddr, r.UserAgent()) + } + } + } + } + + handler.ServeHTTP(w, r) + }) +} + +// WithNonReadyRequestLogging rejects the request until the process has been ready once. +func WithNonReadyRequestLogging(handler http.Handler, hasBeenReadySignal lifecycleSignal) http.Handler { + if hasBeenReadySignal == nil { + return handler + } + + var nonReadyRequestReceived atomic.Bool + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + select { + case <-hasBeenReadySignal.Signaled(): + handler.ServeHTTP(w, r) + return + default: + } + + // ignore connections to local IP. Those clients better know what they are doing. + if pth := "/" + strings.TrimLeft(r.URL.Path, "/"); pth != "/readyz" && pth != "/healthz" && pth != "/livez" { + if isLocal(r) { + if !isKubeApiserverLoopBack(r) { + audit.AddAuditAnnotation(r.Context(), "openshift.io/unready", fmt.Sprintf("loopback=true,%v,readyz=false", r.URL.Host)) + klog.V(2).Infof("Loopback request to %q (user agent %q) before server is ready. This client probably does not watch /readyz and might get inconsistent answers.", r.URL.Path, r.UserAgent()) + } + } else { + audit.AddAuditAnnotation(r.Context(), "openshift.io/unready", fmt.Sprintf("loopback=false,%v,readyz=false", r.URL.Host)) + klog.Warningf("Request to %q (source IP %s, user agent %q) before server is ready, possibly a sign for a broken load balancer setup.", r.URL.Path, r.RemoteAddr, r.UserAgent()) + + // create only one event to avoid event spam. + var eventf eventfFunc + eventf, _ = unexpectedRequestsEventf.Load().(eventfFunc) + if swapped := nonReadyRequestReceived.CAS(false, true); swapped && eventf != nil { + eventf(corev1.EventTypeWarning, "NonReadyRequests", "The kube-apiserver received requests (e.g. from %q, user agent %q, accessing %s) before it was ready, possibly a sign for a broken load balancer setup.", r.RemoteAddr, r.UserAgent(), r.URL.Path) + } + } + } + + handler.ServeHTTP(w, r) + }) +} + +func isLocal(req *http.Request) bool { + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + // ignore error and keep going + } else if ip := netutils.ParseIPSloppy(host); ip != nil { + return ip.IsLoopback() + } + + return false +} + +func isKubeApiserverLoopBack(req *http.Request) bool { + return strings.HasPrefix(req.UserAgent(), "kube-apiserver/") +} + +type nullEventSink struct{} + +func (nullEventSink) Create(event *corev1.Event) (*corev1.Event, error) { + return nil, nil +} + +func (nullEventSink) Destroy() { +} + +type clientEventSink struct { + *v1.EventSinkImpl +} + +func (clientEventSink) Destroy() { +} diff --git a/staging/src/k8s.io/apiserver/pkg/server/routes/openapi.go b/staging/src/k8s.io/apiserver/pkg/server/routes/openapi.go index 12c8b1ad9100b..13982b7b4cef4 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/routes/openapi.go +++ b/staging/src/k8s.io/apiserver/pkg/server/routes/openapi.go @@ -17,6 +17,8 @@ limitations under the License. package routes import ( + "strings" + restful "github.com/emicklei/go-restful/v3" "k8s.io/klog/v2" @@ -38,10 +40,35 @@ type OpenAPI struct { // Install adds the SwaggerUI webservice to the given mux. func (oa OpenAPI) InstallV2(c *restful.Container, mux *mux.PathRecorderMux) (*handler.OpenAPIService, *spec.Swagger) { + // we shadow ClustResourceQuotas, RoleBindingRestrictions, and SecurityContextContstraints + // with a CRD. This loop removes all CRQ,RBR, SCC paths + // from the OpenAPI spec such that they don't conflict with the CRD + // apiextensions-apiserver spec during merging. + oa.Config.IgnorePrefixes = append(oa.Config.IgnorePrefixes, + "/apis/quota.openshift.io/v1/clusterresourcequotas", + "/apis/security.openshift.io/v1/securitycontextconstraints", + "/apis/authorization.openshift.io/v1/rolebindingrestrictions", + "/apis/authorization.openshift.io/v1/namespaces/{namespace}/rolebindingrestrictions", + "/apis/authorization.openshift.io/v1/watch/namespaces/{namespace}/rolebindingrestrictions", + "/apis/authorization.openshift.io/v1/watch/rolebindingrestrictions") + spec, err := builder2.BuildOpenAPISpecFromRoutes(restfuladapter.AdaptWebServices(c.RegisteredWebServices()), oa.Config) if err != nil { klog.Fatalf("Failed to build open api spec for root: %v", err) } + + // we shadow ClustResourceQuotas, RoleBindingRestrictions, and SecurityContextContstraints + // with a CRD. This loop removes all CRQ,RBR, SCC paths + // from the OpenAPI spec such that they don't conflict with the CRD + // apiextensions-apiserver spec during merging. + for pth := range spec.Paths.Paths { + if strings.HasPrefix(pth, "/apis/quota.openshift.io/v1/clusterresourcequotas") || + strings.Contains(pth, "rolebindingrestrictions") || + strings.HasPrefix(pth, "/apis/security.openshift.io/v1/securitycontextconstraints") { + delete(spec.Paths.Paths, pth) + } + } + spec.Definitions = handler.PruneDefaults(spec.Definitions) openAPIVersionedService := handler.NewOpenAPIService(spec) openAPIVersionedService.RegisterOpenAPIVersionedService("/openapi/v2", mux) diff --git a/staging/src/k8s.io/apiserver/pkg/server/signal.go b/staging/src/k8s.io/apiserver/pkg/server/signal.go index e5334ae4c15f5..bdd2728f8abf8 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/signal.go +++ b/staging/src/k8s.io/apiserver/pkg/server/signal.go @@ -20,6 +20,8 @@ import ( "context" "os" "os/signal" + + "k8s.io/klog/v2" ) var onlyOneSignalHandler = make(chan struct{}) @@ -34,10 +36,26 @@ func SetupSignalHandler() <-chan struct{} { return SetupSignalContext().Done() } +// SetupSignalHandlerIgnoringFurtherSignals is the same as SetupSignalContext, except +// it ignores further exit signals after receiving the first one. +func SetupSignalHandlerIgnoringFurtherSignals() <-chan struct{} { + return SetupSignalContextNotExiting().Done() +} + // SetupSignalContext is same as SetupSignalHandler, but a context.Context is returned. // Only one of SetupSignalContext and SetupSignalHandler should be called, and only can // be called once. func SetupSignalContext() context.Context { + return setupSignalContext(true) +} + +// SetupSignalContextNotExiting is the same as SetupSignalContext, except +// it ignores further exit signals after receiving the first one. +func SetupSignalContextNotExiting() context.Context { + return setupSignalContext(false) +} + +func setupSignalContext(exitOnSecondSignal bool) context.Context { close(onlyOneSignalHandler) // panics when called twice shutdownHandler = make(chan os.Signal, 2) @@ -47,8 +65,15 @@ func SetupSignalContext() context.Context { go func() { <-shutdownHandler cancel() - <-shutdownHandler - os.Exit(1) // second signal. Exit directly. + if exitOnSecondSignal { + <-shutdownHandler + os.Exit(1) + } else { + for { + <-shutdownHandler + klog.Infof("Termination signal has been received already. Ignoring signal.") + } + } }() return ctx diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/etcd3retry/retry_etcdclient.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/etcd3retry/retry_etcdclient.go new file mode 100644 index 0000000000000..ea9ea06601bf9 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/etcd3retry/retry_etcdclient.go @@ -0,0 +1,201 @@ +package etcd3retry + +import ( + "context" + "time" + + etcdrpc "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + "google.golang.org/grpc/codes" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/storage" + "k8s.io/apiserver/pkg/storage/etcd3/metrics" + "k8s.io/klog/v2" +) + +var DefaultRetry = wait.Backoff{ + Duration: 300 * time.Millisecond, + Factor: 2, // double the timeout for every failure + Jitter: 0.1, + Steps: 6, // .3 + .6 + 1.2 + 2.4 + 4.8 = 10ish this lets us smooth out short bumps but not long ones and keeps retry behavior closer. +} + +type retryClient struct { + // embed because we only want to override a few states + storage.Interface +} + +// New returns an etcd3 implementation of storage.Interface. +func NewRetryingEtcdStorage(delegate storage.Interface) storage.Interface { + return &retryClient{Interface: delegate} +} + +// Create adds a new object at a key unless it already exists. 'ttl' is time-to-live +// in seconds (0 means forever). If no error is returned and out is not nil, out will be +// set to the read value from database. +func (c *retryClient) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error { + return OnError(ctx, DefaultRetry, IsRetriableEtcdError, func() error { + return c.Interface.Create(ctx, key, obj, out, ttl) + }) +} + +// Delete removes the specified key and returns the value that existed at that spot. +// If key didn't exist, it will return NotFound storage error. +func (c *retryClient) Delete(ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions, validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object, opts storage.DeleteOptions) error { + return OnError(ctx, DefaultRetry, IsRetriableEtcdError, func() error { + return c.Interface.Delete(ctx, key, out, preconditions, validateDeletion, cachedExistingObject, opts) + }) +} + +// Watch begins watching the specified key. Events are decoded into API objects, +// and any items selected by 'p' are sent down to returned watch.Interface. +// resourceVersion may be used to specify what version to begin watching, +// which should be the current resourceVersion, and no longer rv+1 +// (e.g. reconnecting without missing any updates). +// If resource version is "0", this interface will get current object at given key +// and send it in an "ADDED" event, before watch starts. +func (c *retryClient) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) { + var ret watch.Interface + err := OnError(ctx, DefaultRetry, IsRetriableEtcdError, func() error { + var innerErr error + ret, innerErr = c.Interface.Watch(ctx, key, opts) + return innerErr + }) + return ret, err +} + +// Get unmarshals json found at key into objPtr. On a not found error, will either +// return a zero object of the requested type, or an error, depending on 'opts.ignoreNotFound'. +// Treats empty responses and nil response nodes exactly like a not found error. +// The returned contents may be delayed, but it is guaranteed that they will +// match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'. +func (c *retryClient) Get(ctx context.Context, key string, opts storage.GetOptions, objPtr runtime.Object) error { + return OnError(ctx, DefaultRetry, IsRetriableEtcdError, func() error { + return c.Interface.Get(ctx, key, opts, objPtr) + }) +} + +// GetList unmarshalls objects found at key into a *List api object (an object +// that satisfies runtime.IsList definition). +// If 'opts.Recursive' is false, 'key' is used as an exact match. If `opts.Recursive' +// is true, 'key' is used as a prefix. +// The returned contents may be delayed, but it is guaranteed that they will +// match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'. +func (c *retryClient) GetList(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error { + return OnError(ctx, DefaultRetry, IsRetriableEtcdError, func() error { + return c.Interface.GetList(ctx, key, opts, listObj) + }) +} + +// GuaranteedUpdate keeps calling 'tryUpdate()' to update key 'key' (of type 'destination') +// retrying the update until success if there is index conflict. +// Note that object passed to tryUpdate may change across invocations of tryUpdate() if +// other writers are simultaneously updating it, so tryUpdate() needs to take into account +// the current contents of the object when deciding how the update object should look. +// If the key doesn't exist, it will return NotFound storage error if ignoreNotFound=false +// else `destination` will be set to the zero value of it's type. +// If the eventual successful invocation of `tryUpdate` returns an output with the same serialized +// contents as the input, it won't perform any update, but instead set `destination` to an object with those +// contents. +// If 'cachedExistingObject' is non-nil, it can be used as a suggestion about the +// current version of the object to avoid read operation from storage to get it. +// However, the implementations have to retry in case suggestion is stale. +// +// Example: +// +// s := /* implementation of Interface */ +// err := s.GuaranteedUpdate( +// +// "myKey", &MyType{}, true, preconditions, +// func(input runtime.Object, res ResponseMeta) (runtime.Object, *uint64, error) { +// // Before each invocation of the user defined function, "input" is reset to +// // current contents for "myKey" in database. +// curr := input.(*MyType) // Guaranteed to succeed. +// +// // Make the modification +// curr.Counter++ +// +// // Return the modified object - return an error to stop iterating. Return +// // a uint64 to alter the TTL on the object, or nil to keep it the same value. +// return cur, nil, nil +// }, cachedExistingObject +// +// ) +func (c *retryClient) GuaranteedUpdate(ctx context.Context, key string, destination runtime.Object, ignoreNotFound bool, + preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, cachedExistingObject runtime.Object) error { + return OnError(ctx, DefaultRetry, IsRetriableEtcdError, func() error { + return c.Interface.GuaranteedUpdate(ctx, key, destination, ignoreNotFound, preconditions, tryUpdate, cachedExistingObject) + }) +} + +// IsRetriableEtcdError returns true if a retry should be attempted, otherwise false. +// errorLabel is set to a non-empty value that reflects the type of error encountered. +func IsRetriableEtcdError(err error) (errorLabel string, retry bool) { + if err != nil { + if etcdError, ok := etcdrpc.Error(err).(etcdrpc.EtcdError); ok { + if etcdError.Code() == codes.Unavailable { + errorLabel = "Unavailable" + retry = true + } + } + } + return +} + +// OnError allows the caller to retry fn in case the error returned by fn is retriable +// according to the provided function. backoff defines the maximum retries and the wait +// interval between two retries. +func OnError(ctx context.Context, backoff wait.Backoff, retriable func(error) (string, bool), fn func() error) error { + var lastErr error + var lastErrLabel string + var retry bool + var retryCounter int + err := backoffWithRequestContext(ctx, backoff, func() (bool, error) { + err := fn() + if retry { + klog.V(1).Infof("etcd retry - counter: %v, lastErrLabel: %s lastError: %v, error: %v", retryCounter, lastErrLabel, lastErr, err) + metrics.UpdateEtcdRequestRetry(lastErrLabel) + } + if err == nil { + return true, nil + } + + lastErrLabel, retry = retriable(err) + if retry { + lastErr = err + retryCounter++ + return false, nil + } + + return false, err + }) + if err == wait.ErrWaitTimeout && lastErr != nil { + err = lastErr + } + return err +} + +// backoffWithRequestContext works with a request context and a Backoff. It ensures that the retry wait never +// exceeds the deadline specified by the request context. +func backoffWithRequestContext(ctx context.Context, backoff wait.Backoff, condition wait.ConditionFunc) error { + for backoff.Steps > 0 { + if ok, err := condition(); err != nil || ok { + return err + } + + if backoff.Steps == 1 { + break + } + + waitBeforeRetry := backoff.Step() + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(waitBeforeRetry): + } + } + + return wait.ErrWaitTimeout +} diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/etcd3retry/retry_etcdclient_test.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/etcd3retry/retry_etcdclient_test.go new file mode 100644 index 0000000000000..36500831e77d3 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/etcd3retry/retry_etcdclient_test.go @@ -0,0 +1,125 @@ +package etcd3retry + +import ( + "context" + "fmt" + "net" + "net/url" + "strconv" + "syscall" + "testing" + + etcdrpc "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + "k8s.io/apiserver/pkg/storage" +) + +func TestOnError(t *testing.T) { + tests := []struct { + name string + returnedFnError func(retryCounter int) error + expectedRetries int + expectedFinalError error + }{ + { + name: "retry ErrLeaderChanged", + returnedFnError: func(_ int) error { return etcdrpc.ErrLeaderChanged }, + expectedRetries: 5, + expectedFinalError: etcdrpc.ErrLeaderChanged, + }, + { + name: "retry ErrLeaderChanged a few times", + returnedFnError: func(retryCounter int) error { + if retryCounter == 3 { + return nil + } + return etcdrpc.ErrLeaderChanged + }, + expectedRetries: 3, + }, + { + name: "no retries", + returnedFnError: func(_ int) error { return nil }, + }, + { + name: "no retries for a random error", + returnedFnError: func(_ int) error { return fmt.Errorf("random error") }, + expectedFinalError: fmt.Errorf("random error"), + }, + } + + for _, scenario := range tests { + t.Run(scenario.name, func(t *testing.T) { + ctx := context.TODO() + // we set it to -1 to indicate that the first + // execution is not a retry + actualRetries := -1 + err := OnError(ctx, DefaultRetry, IsRetriableEtcdError, func() error { + actualRetries++ + return scenario.returnedFnError(actualRetries) + }) + + if actualRetries != scenario.expectedRetries { + t.Errorf("Unexpected number of retries %v, expected %v", actualRetries, scenario.expectedRetries) + } + if (err == nil && scenario.expectedFinalError != nil) || (err != nil && scenario.expectedFinalError == nil) { + t.Errorf("Expected error %v, got %v", scenario.expectedFinalError, err) + } + if err != nil && scenario.expectedFinalError != nil && err.Error() != scenario.expectedFinalError.Error() { + t.Errorf("Expected error %v, got %v", scenario.expectedFinalError, err) + } + }) + } +} + +func TestIsRetriableEtcdError(t *testing.T) { + tests := []struct { + name string + etcdErr error + errorLabelExpected string + retryExpected bool + }{ + { + name: "error is nil", + errorLabelExpected: "", + retryExpected: false, + }, + { + name: "generic storage error", + etcdErr: storage.NewKeyNotFoundError("key", 0), + errorLabelExpected: "", + retryExpected: false, + }, + { + name: "connection refused error", + etcdErr: &url.Error{Err: &net.OpError{Err: syscall.ECONNREFUSED}}, + errorLabelExpected: "", + retryExpected: false, + }, + { + name: "etcd unavailable error", + etcdErr: etcdrpc.ErrLeaderChanged, + errorLabelExpected: "Unavailable", + retryExpected: true, + }, + { + name: "should also inspect error message", + etcdErr: fmt.Errorf("etcdserver: leader changed"), + errorLabelExpected: "Unavailable", + retryExpected: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + errorCodeGot, retryGot := IsRetriableEtcdError(test.etcdErr) + + if test.errorLabelExpected != errorCodeGot { + t.Errorf("expected error code: %s but got: %s", test.errorLabelExpected, errorCodeGot) + } + + if test.retryExpected != retryGot { + t.Errorf("expected retry: %s but got: %s", strconv.FormatBool(test.retryExpected), strconv.FormatBool(retryGot)) + } + }) + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go index 747e120dc7490..cd61feec0ed02 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go @@ -153,6 +153,14 @@ var ( }, []string{"resource"}, ) + etcdRequestRetry = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "etcd_request_retry_total", + Help: "Etcd request retry total", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"error"}, + ) ) var registerMetrics sync.Once @@ -175,6 +183,7 @@ func Register() { legacyregistry.MustRegister(listStorageNumSelectorEvals) legacyregistry.MustRegister(listStorageNumReturned) legacyregistry.MustRegister(decodeErrorCounts) + legacyregistry.MustRegister(etcdRequestRetry) }) } @@ -239,6 +248,11 @@ func UpdateLeaseObjectCount(count int64) { etcdLeaseObjectCounts.WithLabelValues().Observe(float64(count)) } +// UpdateEtcdRequestRetry sets the etcd_request_retry_total metric. +func UpdateEtcdRequestRetry(errorCode string) { + etcdRequestRetry.WithLabelValues(errorCode).Inc() +} + // RecordListEtcd3Metrics notes various metrics of the cost to serve a LIST request func RecordStorageListMetrics(resource string, numFetched, numEvald, numReturned int) { listStorageCount.WithLabelValues(resource).Inc() diff --git a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go index 49aeaec2bee21..5245a8eeaf4dd 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go @@ -39,6 +39,7 @@ import ( "go.uber.org/zap/zapcore" "golang.org/x/time/rate" "google.golang.org/grpc" + "k8s.io/apiserver/pkg/storage/etcd3/etcd3retry" "k8s.io/klog/v2" "k8s.io/apimachinery/pkg/runtime" @@ -155,13 +156,13 @@ func newETCD3Check(c storagebackend.Config, timeout time.Duration, stopCh <-chan // retry in a loop in the background until we successfully create the client, storing the client or error encountered lock := sync.RWMutex{} - var prober *etcd3ProberMonitor + var prober *etcd3RetryingProberMonitor clientErr := fmt.Errorf("etcd client connection not yet established") go wait.PollImmediateUntil(time.Second, func() (bool, error) { lock.Lock() defer lock.Unlock() - newProber, err := newETCD3ProberMonitor(c) + newProber, err := newRetryingETCD3ProberMonitor(c) // Ensure that server is already not shutting down. select { case <-stopCh: @@ -464,8 +465,9 @@ func newETCD3Storage(c storagebackend.ConfigForResource, newFunc, newListFunc fu versioner := storage.APIObjectVersioner{} decoder := etcd3.NewDefaultDecoder(c.Codec, versioner) - store := etcd3.New(client, c.Codec, newFunc, newListFunc, c.Prefix, resourcePrefix, c.GroupResource, transformer, c.LeaseManagerConfig, decoder, versioner) + store := etcd3retry.NewRetryingEtcdStorage(etcd3.New(client, c.Codec, newFunc, newListFunc, c.Prefix, resourcePrefix, c.GroupResource, transformer, c.LeaseManagerConfig, decoder, versioner)) return store, destroyFunc, nil + } // startDBSizeMonitorPerEndpoint starts a loop to monitor etcd database size and update the diff --git a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go index 2bf3727e8a77d..0967a84cbe83f 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go @@ -69,7 +69,7 @@ func CreateProber(c storagebackend.Config) (Prober, error) { case storagebackend.StorageTypeETCD2: return nil, fmt.Errorf("%s is no longer a supported storage backend", c.Type) case storagebackend.StorageTypeUnset, storagebackend.StorageTypeETCD3: - return newETCD3ProberMonitor(c) + return newRetryingETCD3ProberMonitor(c) default: return nil, fmt.Errorf("unknown storage type: %s", c.Type) } @@ -80,7 +80,7 @@ func CreateMonitor(c storagebackend.Config) (metrics.Monitor, error) { case storagebackend.StorageTypeETCD2: return nil, fmt.Errorf("%s is no longer a supported storage backend", c.Type) case storagebackend.StorageTypeUnset, storagebackend.StorageTypeETCD3: - return newETCD3ProberMonitor(c) + return newRetryingETCD3ProberMonitor(c) default: return nil, fmt.Errorf("unknown storage type: %s", c.Type) } diff --git a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/retry_etcdprobemonitor.go b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/retry_etcdprobemonitor.go new file mode 100644 index 0000000000000..ab210464f50fa --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/retry_etcdprobemonitor.go @@ -0,0 +1,46 @@ +package factory + +import ( + "context" + + "k8s.io/apiserver/pkg/storage/etcd3/etcd3retry" + "k8s.io/apiserver/pkg/storage/etcd3/metrics" + "k8s.io/apiserver/pkg/storage/storagebackend" +) + +type proberMonitor interface { + Prober + metrics.Monitor +} + +type etcd3RetryingProberMonitor struct { + delegate proberMonitor +} + +func newRetryingETCD3ProberMonitor(c storagebackend.Config) (*etcd3RetryingProberMonitor, error) { + delegate, err := newETCD3ProberMonitor(c) + if err != nil { + return nil, err + } + return &etcd3RetryingProberMonitor{delegate: delegate}, nil +} + +func (t *etcd3RetryingProberMonitor) Probe(ctx context.Context) error { + return etcd3retry.OnError(ctx, etcd3retry.DefaultRetry, etcd3retry.IsRetriableEtcdError, func() error { + return t.delegate.Probe(ctx) + }) +} + +func (t *etcd3RetryingProberMonitor) Monitor(ctx context.Context) (metrics.StorageMetrics, error) { + var ret metrics.StorageMetrics + err := etcd3retry.OnError(ctx, etcd3retry.DefaultRetry, etcd3retry.IsRetriableEtcdError, func() error { + var innerErr error + ret, innerErr = t.delegate.Monitor(ctx) + return innerErr + }) + return ret, err +} + +func (t *etcd3RetryingProberMonitor) Close() error { + return t.delegate.Close() +} diff --git a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/retry_etcdprobemonitor_test.go b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/retry_etcdprobemonitor_test.go new file mode 100644 index 0000000000000..db6819ef5f684 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/retry_etcdprobemonitor_test.go @@ -0,0 +1,147 @@ +package factory + +import ( + "context" + "fmt" + "testing" + + etcdrpc "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + + "k8s.io/apiserver/pkg/storage/etcd3/metrics" +) + +func getRetryScenarios() []struct { + name string + retryFnError func() error + expectedRetries int + expectedFinalError error +} { + return []struct { + name string + retryFnError func() error + expectedRetries int + expectedFinalError error + }{ + { + name: "retry ErrLeaderChanged", + retryFnError: func() error { + return etcdrpc.ErrLeaderChanged + }, + expectedRetries: 5, + expectedFinalError: etcdrpc.ErrLeaderChanged, + }, + { + name: "retry ErrLeaderChanged a few times", + retryFnError: func() func() error { + retryCounter := -1 + return func() error { + retryCounter++ + if retryCounter == 3 { + return nil + } + return etcdrpc.ErrLeaderChanged + } + }(), + expectedRetries: 3, + }, + { + name: "no retries", + retryFnError: func() error { + return nil + }, + }, + { + name: "no retries for a random error", + retryFnError: func() error { + return fmt.Errorf("random error") + }, + expectedFinalError: fmt.Errorf("random error"), + }, + } +} + +func TestEtcd3RetryingProber(t *testing.T) { + for _, scenario := range getRetryScenarios() { + t.Run(scenario.name, func(t *testing.T) { + ctx := context.TODO() + targetDelegate := &fakeEtcd3RetryingProberMonitor{ + // we set it to -1 to indicate that the first + // execution is not a retry + actualRetries: -1, + probeFn: scenario.retryFnError, + } + + target := &etcd3RetryingProberMonitor{delegate: targetDelegate} + err := target.Probe(ctx) + + if targetDelegate.actualRetries != scenario.expectedRetries { + t.Errorf("Unexpected number of retries %v, expected %v", targetDelegate.actualRetries, scenario.expectedRetries) + } + if (err == nil && scenario.expectedFinalError != nil) || (err != nil && scenario.expectedFinalError == nil) { + t.Errorf("Expected error %v, got %v", scenario.expectedFinalError, err) + } + if err != nil && scenario.expectedFinalError != nil && err.Error() != scenario.expectedFinalError.Error() { + t.Errorf("Expected error %v, got %v", scenario.expectedFinalError, err) + } + }) + } +} + +func TestEtcd3RetryingMonitor(t *testing.T) { + for _, scenario := range getRetryScenarios() { + t.Run(scenario.name, func(t *testing.T) { + ctx := context.TODO() + expectedRetValue := int64(scenario.expectedRetries) + targetDelegate := &fakeEtcd3RetryingProberMonitor{ + // we set it to -1 to indicate that the first + // execution is not a retry + actualRetries: -1, + monitorFn: func() func() (metrics.StorageMetrics, error) { + retryCounter := -1 + return func() (metrics.StorageMetrics, error) { + retryCounter++ + err := scenario.retryFnError() + ret := metrics.StorageMetrics{int64(retryCounter)} + return ret, err + } + }(), + } + + target := &etcd3RetryingProberMonitor{delegate: targetDelegate} + actualRetValue, err := target.Monitor(ctx) + + if targetDelegate.actualRetries != scenario.expectedRetries { + t.Errorf("Unexpected number of retries %v, expected %v", targetDelegate.actualRetries, scenario.expectedRetries) + } + if (err == nil && scenario.expectedFinalError != nil) || (err != nil && scenario.expectedFinalError == nil) { + t.Errorf("Expected error %v, got %v", scenario.expectedFinalError, err) + } + if err != nil && scenario.expectedFinalError != nil && err.Error() != scenario.expectedFinalError.Error() { + t.Errorf("Expected error %v, got %v", scenario.expectedFinalError, err) + } + if actualRetValue.Size != expectedRetValue { + t.Errorf("Unexpected value returned actual %v, expected %v", actualRetValue.Size, expectedRetValue) + } + }) + } +} + +type fakeEtcd3RetryingProberMonitor struct { + actualRetries int + probeFn func() error + monitorFn func() (metrics.StorageMetrics, error) +} + +func (f *fakeEtcd3RetryingProberMonitor) Probe(_ context.Context) error { + f.actualRetries++ + return f.probeFn() +} + +func (f *fakeEtcd3RetryingProberMonitor) Monitor(_ context.Context) (metrics.StorageMetrics, error) { + f.actualRetries++ + return f.monitorFn() +} + +func (f *fakeEtcd3RetryingProberMonitor) Close() error { + panic("not implemented") +} diff --git a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/aes/aes_test.go b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/aes/aes_test.go index 65c10bdd0ded4..a2e5a9278926e 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/aes/aes_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/aes/aes_test.go @@ -730,10 +730,12 @@ func TestRoundTrip(t *testing.T) { if err != nil { t.Fatal(err) } - aes24block, err := aes.NewCipher(bytes.Repeat([]byte("b"), 24)) + /* FIPS disabled + aes24block, err := aes.NewCipher([]byte(bytes.Repeat([]byte("b"), 24))) if err != nil { t.Fatal(err) } + */ key32 := bytes.Repeat([]byte("c"), 32) aes32block, err := aes.NewCipher(key32) if err != nil { @@ -746,10 +748,10 @@ func TestRoundTrip(t *testing.T) { t value.Transformer }{ {name: "GCM 16 byte key", t: newGCMTransformer(t, aes16block, nil)}, - {name: "GCM 24 byte key", t: newGCMTransformer(t, aes24block, nil)}, + // FIPS disabled {name: "GCM 24 byte key", t: newGCMTransformer(t, aes24block, nil)}, {name: "GCM 32 byte key", t: newGCMTransformer(t, aes32block, nil)}, {name: "GCM 16 byte unsafe key", t: newGCMTransformerWithUniqueKeyUnsafeTest(t, aes16block, nil)}, - {name: "GCM 24 byte unsafe key", t: newGCMTransformerWithUniqueKeyUnsafeTest(t, aes24block, nil)}, + // FIPS disabled {name: "GCM 24 byte unsafe key", t: newGCMTransformerWithUniqueKeyUnsafeTest(t, aes24block, nil)}, {name: "GCM 32 byte unsafe key", t: newGCMTransformerWithUniqueKeyUnsafeTest(t, aes32block, nil)}, {name: "GCM 32 byte seed", t: newHKDFExtendedNonceGCMTransformerTest(t, nil, key32)}, {name: "CBC 32 byte key", t: NewCBCTransformer(aes32block)}, diff --git a/staging/src/k8s.io/cli-runtime/go.mod b/staging/src/k8s.io/cli-runtime/go.mod index ae376cadc89ee..f67524b062abd 100644 --- a/staging/src/k8s.io/cli-runtime/go.mod +++ b/staging/src/k8s.io/cli-runtime/go.mod @@ -74,6 +74,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/cli-runtime/go.sum b/staging/src/k8s.io/cli-runtime/go.sum index fdd3895bceb06..bb7f2aa389f5a 100644 --- a/staging/src/k8s.io/cli-runtime/go.sum +++ b/staging/src/k8s.io/cli-runtime/go.sum @@ -87,10 +87,10 @@ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= diff --git a/staging/src/k8s.io/client-go/go.mod b/staging/src/k8s.io/client-go/go.mod index 50f736561a611..dfedc730887d1 100644 --- a/staging/src/k8s.io/client-go/go.mod +++ b/staging/src/k8s.io/client-go/go.mod @@ -56,7 +56,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/onsi/ginkgo/v2 v2.21.0 // indirect + github.com/onsi/ginkgo/v2 v2.20.2 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/x448/float16 v0.8.4 // indirect @@ -68,6 +68,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery ) diff --git a/staging/src/k8s.io/client-go/go.sum b/staging/src/k8s.io/client-go/go.sum index 378e80b311a10..c27200bfb870a 100644 --- a/staging/src/k8s.io/client-go/go.sum +++ b/staging/src/k8s.io/client-go/go.sum @@ -74,10 +74,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= diff --git a/staging/src/k8s.io/cloud-provider/go.mod b/staging/src/k8s.io/cloud-provider/go.mod index a67e8c684893d..09114bdce9843 100644 --- a/staging/src/k8s.io/cloud-provider/go.mod +++ b/staging/src/k8s.io/cloud-provider/go.mod @@ -13,11 +13,11 @@ require ( github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/apiserver v0.0.0 - k8s.io/client-go v0.0.0 - k8s.io/component-base v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/apiserver v0.32.0 + k8s.io/client-go v0.32.0 + k8s.io/component-base v0.32.0 k8s.io/component-helpers v0.0.0 k8s.io/controller-manager v0.0.0 k8s.io/klog/v2 v2.130.1 @@ -64,6 +64,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/openshift/library-go v0.0.0-20241212055402-9dbaddb63ab9 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.19.1 // indirect @@ -84,6 +85,7 @@ require ( go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.28.0 // indirect @@ -95,15 +97,15 @@ require ( golang.org/x/term v0.25.0 // indirect golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.7.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/grpc v1.65.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect + google.golang.org/grpc v1.67.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/kms v0.0.0 // indirect + k8s.io/kms v0.32.0 // indirect k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect @@ -112,7 +114,12 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + github.com/openshift/api => github.com/dusk125/api v0.0.0-20241212053709-6b333900129e + github.com/openshift/client-go => github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385 + github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 k8s.io/api => ../api + k8s.io/apiextensions-apiserver => ../apiextensions-apiserver k8s.io/apimachinery => ../apimachinery k8s.io/apiserver => ../apiserver k8s.io/client-go => ../client-go @@ -120,4 +127,5 @@ replace ( k8s.io/component-helpers => ../component-helpers k8s.io/controller-manager => ../controller-manager k8s.io/kms => ../kms + k8s.io/kube-aggregator => ../kube-aggregator ) diff --git a/staging/src/k8s.io/cloud-provider/go.sum b/staging/src/k8s.io/cloud-provider/go.sum index 018b07378daeb..75f9c194a43b7 100644 --- a/staging/src/k8s.io/cloud-provider/go.sum +++ b/staging/src/k8s.io/cloud-provider/go.sum @@ -1,10 +1,12 @@ cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/RangelReale/osincli v0.0.0-20160924135400-fababb0555f2/go.mod h1:XyjUkMA8GN+tOOPXvnbi3XuRxWFvTJntqvTFnjmhzbk= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -22,7 +24,7 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= @@ -36,19 +38,31 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/distribution/v3 v3.0.0-20230511163743-f7717b7855ca/go.mod h1:t1IxPNGdTGez+YGKyJyQrtSSqisfMIm1hnFhvMPlxtE= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/dusk125/api v0.0.0-20241212053709-6b333900129e/go.mod h1:lvUN3WEfcZlZxWNEhBKGAbW1UqaIexBLqcYIMXQDh2c= +github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385/go.mod h1:yv2o2+uOZRWD4E30SHdQ66mtcpV1qL0Px03vYjrvM4s= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 h1:83mHQ9+8+Fd+6Zb5aNPiUhgjCUiRCHbe6HuTFA2us78= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9/go.mod h1:vbBfvIsLddBDFa0WF+id4m7KuQmNRsVUBH5zIZa2EcQ= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap/v3 v3.4.3/go.mod h1:7LdHfVt6iIOESVEe3Bs4Jp2sHEKgDeduAhgM1/f9qmo= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -72,9 +86,15 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= +github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= +github.com/gonum/graph v0.0.0-20170401004347-50b27dea7ebb/go.mod h1:ye018NnX1zrbOLqwBvs2HqyyTouQgnL8C+qzYk1snPY= +github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks= +github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A= +github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= @@ -91,6 +111,7 @@ github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgY github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -102,6 +123,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= @@ -136,17 +159,25 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.1.13/go.mod h1:R016aXacfp/gwQBYw2FDGa9m+n6atbLWrYY8hNMT/sA= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0/go.mod h1:wAR5JopumPtAZnu0Cjv2PSqV4p4QB09LMhc6fZZTXuA= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= @@ -155,6 +186,7 @@ github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= @@ -222,6 +254,7 @@ go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -279,12 +312,12 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -312,6 +345,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcp sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/staging/src/k8s.io/cluster-bootstrap/go.mod b/staging/src/k8s.io/cluster-bootstrap/go.mod index 0bd2020cacbe3..f43aa6a89ce4a 100644 --- a/staging/src/k8s.io/cluster-bootstrap/go.mod +++ b/staging/src/k8s.io/cluster-bootstrap/go.mod @@ -40,6 +40,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery ) diff --git a/staging/src/k8s.io/cluster-bootstrap/go.sum b/staging/src/k8s.io/cluster-bootstrap/go.sum index 3cb8bab46e34a..e97475107f050 100644 --- a/staging/src/k8s.io/cluster-bootstrap/go.sum +++ b/staging/src/k8s.io/cluster-bootstrap/go.sum @@ -41,8 +41,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= diff --git a/staging/src/k8s.io/code-generator/examples/go.mod b/staging/src/k8s.io/code-generator/examples/go.mod index bd5004269a8dd..519322ec3a274 100644 --- a/staging/src/k8s.io/code-generator/examples/go.mod +++ b/staging/src/k8s.io/code-generator/examples/go.mod @@ -7,8 +7,8 @@ go 1.23.0 godebug default=go1.23 require ( - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 k8s.io/client-go v0.0.0 k8s.io/klog/v2 v2.130.1 k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f @@ -57,3 +57,13 @@ replace ( k8s.io/apimachinery => ../../apimachinery k8s.io/client-go => ../../client-go ) + +replace github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + +replace github.com/openshift/api => github.com/dusk125/api v0.0.0-20241212053709-6b333900129e + +replace github.com/openshift/client-go => github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385 + +replace github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 + +replace github.com/openshift/apiserver-library-go => github.com/dusk125/apiserver-library-go v0.0.0-20241212055705-41777f979e50 diff --git a/staging/src/k8s.io/code-generator/examples/go.sum b/staging/src/k8s.io/code-generator/examples/go.sum index 1b79bdfce540b..1cd0aeb01f50e 100644 --- a/staging/src/k8s.io/code-generator/examples/go.sum +++ b/staging/src/k8s.io/code-generator/examples/go.sum @@ -57,10 +57,10 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/staging/src/k8s.io/code-generator/examples/hack/verify-codegen.sh b/staging/src/k8s.io/code-generator/examples/hack/verify-codegen.sh index 7dd5e657671fb..72ead8ed65062 100755 --- a/staging/src/k8s.io/code-generator/examples/hack/verify-codegen.sh +++ b/staging/src/k8s.io/code-generator/examples/hack/verify-codegen.sh @@ -43,6 +43,8 @@ else exit 1 fi +GOFLAGS=-mod=readonly + # smoke test echo "Smoke testing examples by compiling..." pushd "${SCRIPT_ROOT}" diff --git a/staging/src/k8s.io/code-generator/go.mod b/staging/src/k8s.io/code-generator/go.mod index 1e23a31ca4320..2f5399eeb78a6 100644 --- a/staging/src/k8s.io/code-generator/go.mod +++ b/staging/src/k8s.io/code-generator/go.mod @@ -46,4 +46,7 @@ require ( sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect ) -replace k8s.io/apimachinery => ../apimachinery +replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + k8s.io/apimachinery => ../apimachinery +) diff --git a/staging/src/k8s.io/code-generator/go.sum b/staging/src/k8s.io/code-generator/go.sum index 268ddf453291f..9d4d545999d31 100644 --- a/staging/src/k8s.io/code-generator/go.sum +++ b/staging/src/k8s.io/code-generator/go.sum @@ -60,10 +60,10 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= diff --git a/staging/src/k8s.io/code-generator/kube_codegen.sh b/staging/src/k8s.io/code-generator/kube_codegen.sh index 8207da5eddd14..1ae0294783028 100755 --- a/staging/src/k8s.io/code-generator/kube_codegen.sh +++ b/staging/src/k8s.io/code-generator/kube_codegen.sh @@ -27,6 +27,8 @@ set -o pipefail KUBE_CODEGEN_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd -P)" +GOFLAGS=-mod=readonly + function kube::codegen::internal::findz() { # We use `find` rather than `git ls-files` because sometimes external # projects use this across repos. This is an imperfect wrapper of find, diff --git a/staging/src/k8s.io/component-base/featuregate/feature_gate.go b/staging/src/k8s.io/component-base/featuregate/feature_gate.go index b6f08a6cd6a3f..416aafc71f975 100644 --- a/staging/src/k8s.io/component-base/featuregate/feature_gate.go +++ b/staging/src/k8s.io/component-base/featuregate/feature_gate.go @@ -327,9 +327,8 @@ func (f *featureGate) unsafeSetFromMap(enabled map[Feature]bool, m map[string]bo key := Feature(k) versionedSpecs, ok := known[key] if !ok { - // early return if encounters an unknown feature. - errs = append(errs, fmt.Errorf("unrecognized feature gate: %s", k)) - return errs + klog.Warningf("unrecognized feature gate: %s", k) + continue } featureSpec := featureSpecAtEmulationVersion(versionedSpecs, emulationVersion) if featureSpec.LockToDefault && featureSpec.Default != v { diff --git a/staging/src/k8s.io/component-base/featuregate/feature_gate_test.go b/staging/src/k8s.io/component-base/featuregate/feature_gate_test.go index b0e0413dcd4bc..3d4b0ddbce863 100644 --- a/staging/src/k8s.io/component-base/featuregate/feature_gate_test.go +++ b/staging/src/k8s.io/component-base/featuregate/feature_gate_test.go @@ -88,7 +88,7 @@ func TestFeatureGateFlag(t *testing.T) { testBetaGate: false, testLockedFalseGate: false, }, - parseError: "unrecognized feature gate: fooBarBaz", + //parseError: "unrecognized feature gate: fooBarBaz", }, { arg: "AllAlpha=false", @@ -417,7 +417,7 @@ func TestFeatureGateSetFromMap(t *testing.T) { testAlphaGate: false, testBetaGate: false, }, - setmapError: "unrecognized feature gate:", + //setmapError: "unrecognized feature gate:", }, { name: "set locked gates", @@ -764,7 +764,7 @@ func TestVersionedFeatureGateFlag(t *testing.T) { testAlphaGateNoVersion: false, testBetaGateNoVersion: false, }, - parseError: "unrecognized feature gate: fooBarBaz", + // parseError: "unrecognized feature gate: fooBarBaz", }, { arg: "AllAlpha=false", @@ -1047,8 +1047,12 @@ func TestVersionedFeatureGateFlag(t *testing.T) { errs = append(errs, err) } err = utilerrors.NewAggregate(errs) + strErr := "" + if err != nil { + strErr = err.Error() + } if test.parseError != "" { - if !strings.Contains(err.Error(), test.parseError) { + if !strings.Contains(strErr, test.parseError) { t.Errorf("%d: Parse() Expected %v, Got %v", i, test.parseError, err) } return @@ -1590,9 +1594,9 @@ func TestCopyKnownFeatures(t *testing.T) { require.NoError(t, fcopy.Set("FeatureB=false")) assert.True(t, f.Enabled("FeatureB")) assert.False(t, fcopy.Enabled("FeatureB")) - if err := fcopy.Set("FeatureC=true"); err == nil { - t.Error("expected FeatureC not registered in the copied feature gate") - } + // if err := fcopy.Set("FeatureC=true"); err == nil { + // t.Error("expected FeatureC not registered in the copied feature gate") + // } } func TestExplicitlySet(t *testing.T) { diff --git a/staging/src/k8s.io/component-base/featuregate/registry_test.go b/staging/src/k8s.io/component-base/featuregate/registry_test.go index 0c362d0d81f80..1df983ecb2f1a 100644 --- a/staging/src/k8s.io/component-base/featuregate/registry_test.go +++ b/staging/src/k8s.io/component-base/featuregate/registry_test.go @@ -261,7 +261,7 @@ func TestFlags(t *testing.T) { "--emulated-version=test=2.7", "--feature-gates=test:testD=true", }, - parseError: "unrecognized feature gate: testD", + // parseError: "unrecognized feature gate: testD", }, { name: "setting unknown component feature flag", diff --git a/staging/src/k8s.io/component-base/go.mod b/staging/src/k8s.io/component-base/go.mod index 314a8e221796a..4f8844b35f116 100644 --- a/staging/src/k8s.io/component-base/go.mod +++ b/staging/src/k8s.io/component-base/go.mod @@ -74,9 +74,9 @@ require ( golang.org/x/term v0.25.0 // indirect golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.7.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/grpc v1.65.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect + google.golang.org/grpc v1.67.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -87,6 +87,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/component-base/go.sum b/staging/src/k8s.io/component-base/go.sum index da154e5cc24e2..45abf210a1fee 100644 --- a/staging/src/k8s.io/component-base/go.sum +++ b/staging/src/k8s.io/component-base/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cel.dev/expr v0.16.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= @@ -17,7 +17,7 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= @@ -28,8 +28,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= @@ -55,7 +55,7 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= @@ -106,13 +106,14 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -213,12 +214,12 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/component-helpers/go.mod b/staging/src/k8s.io/component-helpers/go.mod index 45639a04e4c4d..2820a068278b9 100644 --- a/staging/src/k8s.io/component-helpers/go.mod +++ b/staging/src/k8s.io/component-helpers/go.mod @@ -57,6 +57,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/component-helpers/go.sum b/staging/src/k8s.io/component-helpers/go.sum index eb695ac7be4dc..d38cca163f245 100644 --- a/staging/src/k8s.io/component-helpers/go.sum +++ b/staging/src/k8s.io/component-helpers/go.sum @@ -66,10 +66,10 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/staging/src/k8s.io/controller-manager/app/serve.go b/staging/src/k8s.io/controller-manager/app/serve.go index 0f1e1fec60ea1..2a99efbddabeb 100644 --- a/staging/src/k8s.io/controller-manager/app/serve.go +++ b/staging/src/k8s.io/controller-manager/app/serve.go @@ -48,7 +48,7 @@ func BuildHandlerChain(apiHandler http.Handler, authorizationInfo *apiserver.Aut } handler = genericapifilters.WithRequestInfo(handler, requestInfoResolver) handler = genericapifilters.WithCacheControl(handler) - handler = genericfilters.WithHTTPLogging(handler) + handler = genericfilters.WithHTTPLogging(handler, nil) handler = genericfilters.WithPanicRecovery(handler, requestInfoResolver) return handler diff --git a/staging/src/k8s.io/controller-manager/go.mod b/staging/src/k8s.io/controller-manager/go.mod index 8af2b7eb8df1e..b8826b30abb24 100644 --- a/staging/src/k8s.io/controller-manager/go.mod +++ b/staging/src/k8s.io/controller-manager/go.mod @@ -12,11 +12,11 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 golang.org/x/oauth2 v0.23.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 k8s.io/apiserver v0.0.0 - k8s.io/client-go v0.0.0 - k8s.io/component-base v0.0.0 + k8s.io/client-go v0.32.0 + k8s.io/component-base v0.32.0 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 ) @@ -79,6 +79,7 @@ require ( go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.28.0 // indirect @@ -89,9 +90,9 @@ require ( golang.org/x/term v0.25.0 // indirect golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.7.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/grpc v1.65.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect + google.golang.org/grpc v1.67.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -104,6 +105,8 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/apiserver => ../apiserver diff --git a/staging/src/k8s.io/controller-manager/go.sum b/staging/src/k8s.io/controller-manager/go.sum index 723f969914195..6cbce8ff59770 100644 --- a/staging/src/k8s.io/controller-manager/go.sum +++ b/staging/src/k8s.io/controller-manager/go.sum @@ -1,6 +1,6 @@ cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= @@ -21,7 +21,7 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= @@ -33,12 +33,13 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9/go.mod h1:vbBfvIsLddBDFa0WF+id4m7KuQmNRsVUBH5zIZa2EcQ= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= @@ -69,7 +70,7 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= @@ -132,13 +133,14 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -218,6 +220,7 @@ go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -274,12 +277,12 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/cri-api/go.mod b/staging/src/k8s.io/cri-api/go.mod index c1627a5d10b8a..9415c68208781 100644 --- a/staging/src/k8s.io/cri-api/go.mod +++ b/staging/src/k8s.io/cri-api/go.mod @@ -11,7 +11,7 @@ godebug winsymlink=0 require ( github.com/gogo/protobuf v1.3.2 github.com/stretchr/testify v1.9.0 - google.golang.org/grpc v1.65.0 + google.golang.org/grpc v1.67.0 ) require ( @@ -22,7 +22,7 @@ require ( golang.org/x/net v0.30.0 // indirect golang.org/x/sys v0.26.0 // indirect golang.org/x/text v0.19.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/staging/src/k8s.io/cri-api/go.sum b/staging/src/k8s.io/cri-api/go.sum index 8157de0f4321e..0a0877c29aa05 100644 --- a/staging/src/k8s.io/cri-api/go.sum +++ b/staging/src/k8s.io/cri-api/go.sum @@ -1,17 +1,17 @@ -cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cel.dev/expr v0.16.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -25,6 +25,7 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= @@ -48,7 +49,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -72,11 +73,11 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/cri-client/go.mod b/staging/src/k8s.io/cri-client/go.mod index 3619c649a9a57..1fa04bf735b54 100644 --- a/staging/src/k8s.io/cri-client/go.mod +++ b/staging/src/k8s.io/cri-client/go.mod @@ -16,7 +16,7 @@ require ( go.opentelemetry.io/otel/sdk v1.28.0 go.opentelemetry.io/otel/trace v1.28.0 golang.org/x/sys v0.26.0 - google.golang.org/grpc v1.65.0 + google.golang.org/grpc v1.67.0 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/client-go v0.0.0 @@ -71,8 +71,8 @@ require ( golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.7.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect @@ -83,6 +83,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/cri-client/go.sum b/staging/src/k8s.io/cri-client/go.sum index 0f949091505ba..94b468c346011 100644 --- a/staging/src/k8s.io/cri-client/go.sum +++ b/staging/src/k8s.io/cri-client/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cel.dev/expr v0.16.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= @@ -18,7 +18,7 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= @@ -54,7 +54,7 @@ github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= @@ -103,10 +103,11 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -205,12 +206,12 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/csi-translation-lib/go.mod b/staging/src/k8s.io/csi-translation-lib/go.mod index b35dfee98cf3c..a1ad44620c570 100644 --- a/staging/src/k8s.io/csi-translation-lib/go.mod +++ b/staging/src/k8s.io/csi-translation-lib/go.mod @@ -39,6 +39,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery ) diff --git a/staging/src/k8s.io/csi-translation-lib/go.sum b/staging/src/k8s.io/csi-translation-lib/go.sum index ce95781b32c4c..04d191bf36c48 100644 --- a/staging/src/k8s.io/csi-translation-lib/go.sum +++ b/staging/src/k8s.io/csi-translation-lib/go.sum @@ -42,8 +42,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= diff --git a/staging/src/k8s.io/dynamic-resource-allocation/go.mod b/staging/src/k8s.io/dynamic-resource-allocation/go.mod index 6c6b54ba6972f..4a6ae2cdaeb98 100644 --- a/staging/src/k8s.io/dynamic-resource-allocation/go.mod +++ b/staging/src/k8s.io/dynamic-resource-allocation/go.mod @@ -14,11 +14,11 @@ require ( github.com/google/go-cmp v0.6.0 github.com/onsi/gomega v1.35.1 github.com/stretchr/testify v1.9.0 - google.golang.org/grpc v1.65.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 + google.golang.org/grpc v1.67.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 k8s.io/apiserver v0.0.0 - k8s.io/client-go v0.0.0 + k8s.io/client-go v0.32.0 k8s.io/component-helpers v0.0.0 k8s.io/klog/v2 v2.130.1 k8s.io/kubelet v0.0.0 @@ -70,13 +70,13 @@ require ( golang.org/x/term v0.25.0 // indirect golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.7.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/component-base v0.0.0 // indirect + k8s.io/component-base v0.32.0 // indirect k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect @@ -84,6 +84,8 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/apiserver => ../apiserver diff --git a/staging/src/k8s.io/dynamic-resource-allocation/go.sum b/staging/src/k8s.io/dynamic-resource-allocation/go.sum index 058dee2c6239c..9518004a184aa 100644 --- a/staging/src/k8s.io/dynamic-resource-allocation/go.sum +++ b/staging/src/k8s.io/dynamic-resource-allocation/go.sum @@ -1,6 +1,6 @@ cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= @@ -18,7 +18,7 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -28,11 +28,12 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9/go.mod h1:vbBfvIsLddBDFa0WF+id4m7KuQmNRsVUBH5zIZa2EcQ= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= @@ -56,7 +57,7 @@ github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= @@ -111,13 +112,14 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -177,6 +179,7 @@ go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8 go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= @@ -228,12 +231,12 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/endpointslice/go.mod b/staging/src/k8s.io/endpointslice/go.mod index 186bf8ab65c27..5e78bbc10e59a 100644 --- a/staging/src/k8s.io/endpointslice/go.mod +++ b/staging/src/k8s.io/endpointslice/go.mod @@ -68,6 +68,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/endpointslice/go.sum b/staging/src/k8s.io/endpointslice/go.sum index 886f63594bf7f..4fa08b88e475f 100644 --- a/staging/src/k8s.io/endpointslice/go.sum +++ b/staging/src/k8s.io/endpointslice/go.sum @@ -87,10 +87,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -182,9 +182,9 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/externaljwt/go.mod b/staging/src/k8s.io/externaljwt/go.mod index eb15fe62476e4..952c28adc8742 100644 --- a/staging/src/k8s.io/externaljwt/go.mod +++ b/staging/src/k8s.io/externaljwt/go.mod @@ -10,7 +10,7 @@ godebug winsymlink=0 require ( github.com/gogo/protobuf v1.3.2 - google.golang.org/grpc v1.65.0 + google.golang.org/grpc v1.67.0 google.golang.org/protobuf v1.35.1 ) @@ -18,5 +18,5 @@ require ( golang.org/x/net v0.30.0 // indirect golang.org/x/sys v0.26.0 // indirect golang.org/x/text v0.19.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect ) diff --git a/staging/src/k8s.io/externaljwt/go.sum b/staging/src/k8s.io/externaljwt/go.sum index dfcea7ccbb065..390ca6e7345a7 100644 --- a/staging/src/k8s.io/externaljwt/go.sum +++ b/staging/src/k8s.io/externaljwt/go.sum @@ -1,19 +1,20 @@ -cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cel.dev/expr v0.16.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -29,7 +30,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -53,10 +54,10 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= diff --git a/staging/src/k8s.io/kms/go.mod b/staging/src/k8s.io/kms/go.mod index 55b001abb135d..d291d45a3b1ad 100644 --- a/staging/src/k8s.io/kms/go.mod +++ b/staging/src/k8s.io/kms/go.mod @@ -10,13 +10,13 @@ godebug winsymlink=0 require ( github.com/gogo/protobuf v1.3.2 - google.golang.org/grpc v1.65.0 + google.golang.org/grpc v1.67.0 ) require ( golang.org/x/net v0.30.0 // indirect golang.org/x/sys v0.26.0 // indirect golang.org/x/text v0.19.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect google.golang.org/protobuf v1.35.1 // indirect ) diff --git a/staging/src/k8s.io/kms/go.sum b/staging/src/k8s.io/kms/go.sum index dfcea7ccbb065..390ca6e7345a7 100644 --- a/staging/src/k8s.io/kms/go.sum +++ b/staging/src/k8s.io/kms/go.sum @@ -1,19 +1,20 @@ -cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cel.dev/expr v0.16.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -29,7 +30,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -53,10 +54,10 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= diff --git a/staging/src/k8s.io/kms/internal/plugins/_mock/go.mod b/staging/src/k8s.io/kms/internal/plugins/_mock/go.mod index 24aa2bdf7013e..55afc05901fda 100644 --- a/staging/src/k8s.io/kms/internal/plugins/_mock/go.mod +++ b/staging/src/k8s.io/kms/internal/plugins/_mock/go.mod @@ -17,9 +17,19 @@ require ( golang.org/x/net v0.30.0 // indirect golang.org/x/sys v0.26.0 // indirect golang.org/x/text v0.19.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/grpc v1.65.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect + google.golang.org/grpc v1.67.0 // indirect google.golang.org/protobuf v1.35.1 // indirect ) replace k8s.io/kms => ../../../../kms + +replace github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + +replace github.com/openshift/api => github.com/dusk125/api v0.0.0-20241212053709-6b333900129e + +replace github.com/openshift/client-go => github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385 + +replace github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 + +replace github.com/openshift/apiserver-library-go => github.com/dusk125/apiserver-library-go v0.0.0-20241212055705-41777f979e50 diff --git a/staging/src/k8s.io/kms/internal/plugins/_mock/go.sum b/staging/src/k8s.io/kms/internal/plugins/_mock/go.sum index 3478c7213acec..12685e389c7e2 100644 --- a/staging/src/k8s.io/kms/internal/plugins/_mock/go.sum +++ b/staging/src/k8s.io/kms/internal/plugins/_mock/go.sum @@ -53,9 +53,9 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= diff --git a/staging/src/k8s.io/kube-aggregator/go.mod b/staging/src/k8s.io/kube-aggregator/go.mod index f40b6ef51a4f2..c4113a74aa4d6 100644 --- a/staging/src/k8s.io/kube-aggregator/go.mod +++ b/staging/src/k8s.io/kube-aggregator/go.mod @@ -13,6 +13,7 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/google/go-cmp v0.6.0 github.com/google/gofuzz v1.2.0 + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 @@ -20,12 +21,12 @@ require ( go.opentelemetry.io/otel/sdk v1.28.0 go.opentelemetry.io/otel/trace v1.28.0 golang.org/x/net v0.30.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/apiserver v0.0.0 - k8s.io/client-go v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/apiserver v0.32.0 + k8s.io/client-go v0.32.0 k8s.io/code-generator v0.0.0 - k8s.io/component-base v0.0.0 + k8s.io/component-base v0.32.0 k8s.io/klog/v2 v2.130.1 k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 @@ -67,8 +68,8 @@ require ( github.com/moby/spdystream v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/openshift/library-go v0.0.0-20241212055402-9dbaddb63ab9 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.19.1 // indirect @@ -86,6 +87,7 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect go.opentelemetry.io/otel/metric v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.28.0 // indirect @@ -98,23 +100,28 @@ require ( golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.26.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/grpc v1.65.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect + google.golang.org/grpc v1.67.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect - k8s.io/kms v0.0.0 // indirect + k8s.io/kms v0.32.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + github.com/openshift/api => github.com/dusk125/api v0.0.0-20241212053709-6b333900129e + github.com/openshift/client-go => github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385 + github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 k8s.io/api => ../api + k8s.io/apiextensions-apiserver => ../apiextensions-apiserver k8s.io/apimachinery => ../apimachinery k8s.io/apiserver => ../apiserver k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/kube-aggregator/go.sum b/staging/src/k8s.io/kube-aggregator/go.sum index 77d538064ab84..2d0dfe68ebc58 100644 --- a/staging/src/k8s.io/kube-aggregator/go.sum +++ b/staging/src/k8s.io/kube-aggregator/go.sum @@ -1,9 +1,11 @@ cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/RangelReale/osincli v0.0.0-20160924135400-fababb0555f2/go.mod h1:XyjUkMA8GN+tOOPXvnbi3XuRxWFvTJntqvTFnjmhzbk= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -22,7 +24,7 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= @@ -34,19 +36,31 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/distribution/v3 v3.0.0-20230511163743-f7717b7855ca/go.mod h1:t1IxPNGdTGez+YGKyJyQrtSSqisfMIm1hnFhvMPlxtE= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/dusk125/api v0.0.0-20241212053709-6b333900129e/go.mod h1:lvUN3WEfcZlZxWNEhBKGAbW1UqaIexBLqcYIMXQDh2c= +github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385/go.mod h1:yv2o2+uOZRWD4E30SHdQ66mtcpV1qL0Px03vYjrvM4s= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 h1:83mHQ9+8+Fd+6Zb5aNPiUhgjCUiRCHbe6HuTFA2us78= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9/go.mod h1:vbBfvIsLddBDFa0WF+id4m7KuQmNRsVUBH5zIZa2EcQ= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap/v3 v3.4.3/go.mod h1:7LdHfVt6iIOESVEe3Bs4Jp2sHEKgDeduAhgM1/f9qmo= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -70,9 +84,15 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= +github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= +github.com/gonum/graph v0.0.0-20170401004347-50b27dea7ebb/go.mod h1:ye018NnX1zrbOLqwBvs2HqyyTouQgnL8C+qzYk1snPY= +github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks= +github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A= +github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= @@ -89,6 +109,7 @@ github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgY github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -100,6 +121,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= @@ -135,17 +158,25 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.1.13/go.mod h1:R016aXacfp/gwQBYw2FDGa9m+n6atbLWrYY8hNMT/sA= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0/go.mod h1:wAR5JopumPtAZnu0Cjv2PSqV4p4QB09LMhc6fZZTXuA= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= @@ -154,6 +185,7 @@ github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= @@ -222,6 +254,7 @@ go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -280,12 +313,12 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -314,6 +347,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcp sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go index d425ae7add051..e8e7e8b70f493 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go @@ -20,12 +20,15 @@ import ( "context" "fmt" "net/http" + "strings" "sync" "time" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/endpoints/discovery/aggregated" @@ -40,6 +43,7 @@ import ( "k8s.io/client-go/transport" "k8s.io/component-base/metrics/legacyregistry" "k8s.io/component-base/tracing" + "k8s.io/klog/v2" v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" v1helper "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/helper" "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1" @@ -156,6 +160,9 @@ type APIAggregator struct { // is the versions for the group. handledGroupVersions map[string]sets.Set[string] + // handledAlwaysLocalDelegatePaths are the URL paths that already have routes registered + handledAlwaysLocalDelegatePaths sets.String + // lister is used to add group handling for /apis/ aggregator lookups based on // controller state lister listers.APIServiceLister @@ -241,19 +248,20 @@ func (c completedConfig) NewWithDelegate(delegationTarget genericapiserver.Deleg } s := &APIAggregator{ - GenericAPIServer: genericServer, - delegateHandler: delegationTarget.UnprotectedHandler(), - proxyTransportDial: proxyTransportDial, - proxyHandlers: map[string]*proxyHandler{}, - handledGroupVersions: map[string]sets.Set[string]{}, - lister: informerFactory.Apiregistration().V1().APIServices().Lister(), - APIRegistrationInformers: informerFactory, - serviceResolver: c.ExtraConfig.ServiceResolver, - openAPIConfig: c.GenericConfig.OpenAPIConfig, - openAPIV3Config: c.GenericConfig.OpenAPIV3Config, - proxyCurrentCertKeyContent: func() (bytes []byte, bytes2 []byte) { return nil, nil }, - rejectForwardingRedirects: c.ExtraConfig.RejectForwardingRedirects, - tracerProvider: c.GenericConfig.TracerProvider, + GenericAPIServer: genericServer, + delegateHandler: delegationTarget.UnprotectedHandler(), + proxyTransportDial: proxyTransportDial, + proxyHandlers: map[string]*proxyHandler{}, + handledGroupVersions: map[string]sets.Set[string]{}, + handledAlwaysLocalDelegatePaths: sets.String{}, + lister: informerFactory.Apiregistration().V1().APIServices().Lister(), + APIRegistrationInformers: informerFactory, + serviceResolver: c.ExtraConfig.ServiceResolver, + openAPIConfig: c.GenericConfig.OpenAPIConfig, + openAPIV3Config: c.GenericConfig.OpenAPIV3Config, + proxyCurrentCertKeyContent: func() (bytes []byte, bytes2 []byte) { return nil, nil }, + rejectForwardingRedirects: c.ExtraConfig.RejectForwardingRedirects, + tracerProvider: c.GenericConfig.TracerProvider, } // used later to filter the served resource by those that have expired. @@ -350,6 +358,7 @@ func (c completedConfig) NewWithDelegate(delegationTarget genericapiserver.Deleg (func() ([]byte, []byte))(s.proxyCurrentCertKeyContent), s.serviceResolver, metrics, + c.GenericConfig.HasBeenReadySignal(), ) if err != nil { return nil, err @@ -370,6 +379,33 @@ func (c completedConfig) NewWithDelegate(delegationTarget genericapiserver.Deleg return nil }) + s.GenericAPIServer.AddPostStartHook("apiservice-wait-for-first-sync", func(context genericapiserver.PostStartHookContext) error { + // when the aggregator first starts, it should make sure that it has proxy handlers for all the known good API services at this time + // we only need to do this once. + err := wait.PollImmediateUntil(100*time.Millisecond, func() (bool, error) { + // fix race + handledAPIServices := sets.StringKeySet(s.proxyHandlers) + apiservices, err := s.lister.List(labels.Everything()) + if err != nil { + return false, err + } + expectedAPIServices := sets.NewString() + for _, apiservice := range apiservices { + if v1helper.IsAPIServiceConditionTrue(apiservice, v1.Available) { + expectedAPIServices.Insert(apiservice.Name) + } + } + + notYetHandledAPIServices := expectedAPIServices.Difference(handledAPIServices) + if len(notYetHandledAPIServices) == 0 { + return true, nil + } + klog.Infof("still waiting on handling APIServices: %v", strings.Join(notYetHandledAPIServices.List(), ",")) + + return false, nil + }, context.Done()) + return err + }) if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AggregatedDiscoveryEndpoint) { s.discoveryAggregationController = NewDiscoveryManager( @@ -555,7 +591,11 @@ func (s *APIAggregator) AddAPIService(apiService *v1.APIService) error { } proxyHandler.updateAPIService(apiService) if s.openAPIAggregationController != nil { - s.openAPIAggregationController.AddAPIService(proxyHandler, apiService) + // this is calling a controller. It should already handle being async. + go func() { + defer utilruntime.HandleCrash() + s.openAPIAggregationController.AddAPIService(proxyHandler, apiService) + }() } if s.openAPIV3AggregationController != nil { s.openAPIV3AggregationController.AddAPIService(proxyHandler, apiService) @@ -564,7 +604,10 @@ func (s *APIAggregator) AddAPIService(apiService *v1.APIService) error { s.discoveryAggregationController.AddAPIService(apiService, proxyHandler) } - s.proxyHandlers[apiService.Name] = proxyHandler + // we want to update the registration bit last after all the pieces are wired together + defer func() { + s.proxyHandlers[apiService.Name] = proxyHandler + }() s.GenericAPIServer.Handler.NonGoRestfulMux.Handle(proxyPath, proxyHandler) s.GenericAPIServer.Handler.NonGoRestfulMux.UnlistedHandlePrefix(proxyPath+"/", proxyHandler) @@ -580,6 +623,18 @@ func (s *APIAggregator) AddAPIService(apiService *v1.APIService) error { return nil } + // For some resources we always want to delegate to local API server. + // These resources have to exists as CRD to be served locally. + for _, alwaysLocalDelegatePath := range alwaysLocalDelegatePathPrefixes.List() { + if s.handledAlwaysLocalDelegatePaths.Has(alwaysLocalDelegatePath) { + continue + } + s.GenericAPIServer.Handler.NonGoRestfulMux.Handle(alwaysLocalDelegatePath, proxyHandler.localDelegate) + // Always use local delegate for this prefix + s.GenericAPIServer.Handler.NonGoRestfulMux.UnlistedHandlePrefix(alwaysLocalDelegatePath+"/", proxyHandler.localDelegate) + s.handledAlwaysLocalDelegatePaths.Insert(alwaysLocalDelegatePath) + } + // it's time to register the group aggregation endpoint groupPath := "/apis/" + apiService.Spec.Group groupDiscoveryHandler := &apiGroupHandler{ diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go index d95a271af76ee..925ab117a4fc9 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go @@ -22,6 +22,7 @@ import ( "sync/atomic" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/httpstream" "k8s.io/apimachinery/pkg/util/proxy" "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" @@ -119,6 +120,14 @@ func (r *proxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { return } + // some groupResources should always be delegated + if requestInfo, ok := genericapirequest.RequestInfoFrom(req.Context()); ok { + if alwaysLocalDelegateGroupResource[schema.GroupResource{Group: requestInfo.APIGroup, Resource: requestInfo.Resource}] { + r.localDelegate.ServeHTTP(w, req) + return + } + } + if !handlingInfo.serviceAvailable { proxyError(w, req, "service unavailable", http.StatusServiceUnavailable) return diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/patch_always_local_delegate.go b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/patch_always_local_delegate.go new file mode 100644 index 0000000000000..f7169c0d1abd6 --- /dev/null +++ b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/patch_always_local_delegate.go @@ -0,0 +1,49 @@ +package apiserver + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +// alwaysLocalDelegatePrefixes specify a list of API paths that we want to delegate to Kubernetes API server +// instead of handling with OpenShift API server. +var alwaysLocalDelegatePathPrefixes = sets.NewString() + +// AddAlwaysLocalDelegateForPrefix will cause the given URL prefix always be served by local API server (kube apiserver). +// This allows to move some resources from aggregated API server into CRD. +func AddAlwaysLocalDelegateForPrefix(prefix string) { + if alwaysLocalDelegatePathPrefixes.Has(prefix) { + return + } + alwaysLocalDelegatePathPrefixes.Insert(prefix) +} + +var overlappingGroupVersion = map[schema.GroupVersion]bool{} + +// AddOverlappingGroupVersion will stop the CRD registration controller from trying to manage an APIService. +func AddOverlappingGroupVersion(groupVersion schema.GroupVersion) { + overlappingGroupVersion[groupVersion] = true +} + +var alwaysLocalDelegateGroupResource = map[schema.GroupResource]bool{} + +func AddAlwaysLocalDelegateGroupResource(groupResource schema.GroupResource) { + alwaysLocalDelegateGroupResource[groupResource] = true +} + +func APIServiceAlreadyExists(groupVersion schema.GroupVersion) bool { + if overlappingGroupVersion[groupVersion] { + return true + } + + testPrefix := fmt.Sprintf("/apis/%s/%s/", groupVersion.Group, groupVersion.Version) + for _, prefix := range alwaysLocalDelegatePathPrefixes.List() { + if strings.HasPrefix(prefix, testPrefix) { + return true + } + } + return false +} diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator.go index 331ae8144e81a..51b94604dc7bf 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator.go @@ -238,9 +238,7 @@ func (s *specProxier) getOpenAPIV3Root() handler3.OpenAPIV3Discovery { s.rwMutex.RLock() defer s.rwMutex.RUnlock() - merged := handler3.OpenAPIV3Discovery{ - Paths: make(map[string]handler3.OpenAPIV3DiscoveryGroupVersion), - } + paths := make(map[string][]handler3.OpenAPIV3DiscoveryGroupVersion) for _, apiServiceInfo := range s.apiServiceInfo { if apiServiceInfo.discovery == nil { @@ -248,10 +246,10 @@ func (s *specProxier) getOpenAPIV3Root() handler3.OpenAPIV3Discovery { } for key, item := range apiServiceInfo.discovery.Paths { - merged.Paths[key] = item + paths[key] = append(paths[key], item) } } - return merged + return mergeOpenAPIV3RootPaths(paths) } // handleDiscovery is the handler for OpenAPI V3 Discovery @@ -278,18 +276,33 @@ func (s *specProxier) handleGroupVersion(w http.ResponseWriter, r *http.Request) url := strings.SplitAfterN(r.URL.Path, "/", 4) targetGV := url[3] + var eligibleURLs []string + eligibleURLsToAPIServiceInfos := make(map[string]*openAPIV3APIServiceInfo) + for _, apiServiceInfo := range s.apiServiceInfo { if apiServiceInfo.discovery == nil { continue } - for key := range apiServiceInfo.discovery.Paths { - if targetGV == key { - apiServiceInfo.handler.ServeHTTP(w, r) - return + for key, value := range apiServiceInfo.discovery.Paths { + if targetGV == key && eligibleURLsToAPIServiceInfos[value.ServerRelativeURL] == nil { + // add only apiServices that do not duplicate ServerRelativeURL (path + hash) + eligibleURLsToAPIServiceInfos[value.ServerRelativeURL] = apiServiceInfo + eligibleURLs = append(eligibleURLs, value.ServerRelativeURL) + break } } + if len(eligibleURLsToAPIServiceInfos) > 0 && !strings.HasPrefix(targetGV, "apis/") { + // do not search for duplicates that are not part of apis/ prefix (eg. /version) + break + } } + + if len(eligibleURLs) > 0 { + delegateAndMergeHandleGroupVersion(w, r, eligibleURLs, eligibleURLsToAPIServiceInfos) + return + } + // No group-versions match the desired request w.WriteHeader(404) } diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator_test.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator_test.go index fdd250dd6a750..6b9ee6939a022 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator_test.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator_test.go @@ -278,7 +278,7 @@ func TestOpenAPIRequestMetrics(t *testing.T) { if err := testutil.GatherAndCompare(legacyregistry.DefaultGatherer, strings.NewReader(` # HELP apiserver_request_total [STABLE] Counter of apiserver requests broken out for each verb, dry run value, group, version, resource, scope, component, and HTTP response code. # TYPE apiserver_request_total counter -apiserver_request_total{code="200",component="",dry_run="",group="",resource="",scope="",subresource="openapi/v3",verb="GET",version=""} 1 +apiserver_request_total{code="200",component="",dry_run="",group="",resource="",scope="",subresource="openapi/v3",system_client="",verb="GET",version=""} 1 `), "apiserver_request_total"); err != nil { t.Fatal(err) } @@ -289,8 +289,8 @@ apiserver_request_total{code="200",component="",dry_run="",group="",resource="", if err := testutil.GatherAndCompare(legacyregistry.DefaultGatherer, strings.NewReader(` # HELP apiserver_request_total [STABLE] Counter of apiserver requests broken out for each verb, dry run value, group, version, resource, scope, component, and HTTP response code. # TYPE apiserver_request_total counter -apiserver_request_total{code="200",component="",dry_run="",group="",resource="",scope="",subresource="openapi/v3",verb="GET",version=""} 1 -apiserver_request_total{code="200",component="",dry_run="",group="",resource="",scope="",subresource="openapi/v3/",verb="GET",version=""} 1 +apiserver_request_total{code="200",component="",dry_run="",group="",resource="",scope="",subresource="openapi/v3",system_client="",verb="GET",version=""} 1 +apiserver_request_total{code="200",component="",dry_run="",group="",resource="",scope="",subresource="openapi/v3/",system_client="",verb="GET",version=""} 1 `), "apiserver_request_total"); err != nil { t.Fatal(err) } diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/patch_aggregator.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/patch_aggregator.go new file mode 100644 index 0000000000000..8b2573f5a056e --- /dev/null +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/patch_aggregator.go @@ -0,0 +1,285 @@ +package aggregator + +import ( + "bytes" + "context" + "crypto/sha512" + "encoding/json" + "fmt" + "net/http" + neturl "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/munnerz/goautoneg" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/util/responsewriter" + "k8s.io/klog/v2" + "k8s.io/kube-openapi/pkg/handler3" + "k8s.io/kube-openapi/pkg/spec3" +) + +// mergeOpenAPIV3RootPaths expects mapping of openapi v3 sub url key to multiple serverRelativeURLs +// these URLs can be backed by different ApiServices or CRDs. +// +// We expect duplicates for the following groups: +// authorization.openshift.io, security.openshift.io and quota.openshift.io +// which are backed by both CRD apiextension apiserver and openshift apiserver. +func mergeOpenAPIV3RootPaths(paths map[string][]handler3.OpenAPIV3DiscoveryGroupVersion) handler3.OpenAPIV3Discovery { + merged := handler3.OpenAPIV3Discovery{ + Paths: make(map[string]handler3.OpenAPIV3DiscoveryGroupVersion), + } + + for key, delegationURLs := range paths { + // some apiservices can have duplicate paths in openapi v3 discovery (same path and hash) as they are backed by the same apiserver + delegationUniqueURLs := sets.List(toUniqueRelativeURLs(delegationURLs)) + // we either have just one url or a special URL like a /version + if len(delegationUniqueURLs) == 1 || (len(delegationUniqueURLs) > 1 && !hasPrefix(delegationUniqueURLs, "/openapi/v3/apis/")) { + merged.Paths[key] = handler3.OpenAPIV3DiscoveryGroupVersion{ + ServerRelativeURL: delegationURLs[0].ServerRelativeURL, // take first found apiServiceInfo + } + } else { + newMergedURL, err := mergeURLETags(delegationUniqueURLs) + if err != nil { + klog.Errorf("failed create merged openapi v3 URL for: %s: %s", key, err.Error()) + continue + } + merged.Paths[key] = handler3.OpenAPIV3DiscoveryGroupVersion{ + ServerRelativeURL: newMergedURL.String(), + } + + } + } + return merged +} + +// delegateAndMergeHandleGroupVersion delegates requests to eligibleURLs and merges their output +// +// We expect to delegate and merge for the following groups: +// authorization.openshift.io, security.openshift.io and quota.openshift.io +// which are backed by both CRD apiextension apiserver and openshift apiserver. +// +// The other requests will be passed to the original apiServiceInfo handler. +func delegateAndMergeHandleGroupVersion(w http.ResponseWriter, r *http.Request, eligibleURLs []string, eligibleURLsToAPIServiceInfos map[string]*openAPIV3APIServiceInfo) { + if len(eligibleURLs) == 1 { + // fully delegate to the handler + eligibleURLsToAPIServiceInfos[eligibleURLs[0]].handler.ServeHTTP(w, r) + return + } else if len(eligibleURLs) > 1 { + mergedURL, err := mergeURLETags(eligibleURLs) + if err != nil { + klog.Errorf("failed to get mergedURL: %s", err.Error()) + w.WriteHeader(http.StatusInternalServerError) + return + } + + if !isHashCurrent(r.URL, mergedURL.Query().Get("hash")) { + http.Redirect(w, r, mergedURL.String(), 301) + return + + } + var specs []*spec3.OpenAPI + var maxLastModified time.Time + + for eligibleURL, apiServiceInfo := range eligibleURLsToAPIServiceInfos { + writer := responsewriter.NewInMemoryResponseWriter() + req, err := createNewAPIServiceRequest(r, eligibleURL) + if err != nil { + klog.Errorf("failed to create request: %s", err.Error()) + continue + } + // delegate to multiple apiService handlers + apiServiceInfo.handler.ServeHTTP(writer, req) + lastModified, err := time.Parse(time.RFC1123, writer.Header().Get("Last-Modified")) + if err != nil { + klog.Warningf("not received Last-Modified in RFC1123 format: %s", err.Error()) + } else if lastModified.After(maxLastModified) { + maxLastModified = lastModified + } + + spec := spec3.OpenAPI{} + if err := json.Unmarshal(writer.Data(), &spec); err != nil { + klog.Errorf("failed to unmarshal OpenAPI for openapiService %v/%v: %s", apiServiceInfo.apiService.Namespace, apiServiceInfo.apiService.Name, err.Error()) + continue + } + specs = append(specs, &spec) + } + + // prefer info and version from external apiServices (will result in openshift title and description) + sort.Slice(specs, func(i, j int) bool { + if info := specs[i].Info; info != nil && strings.HasPrefix(strings.ToLower(info.Title), "kubernetes") { + return false + } + return true + }) + mergedSpec, err := mergeSpecsV3(specs...) + if err != nil { + klog.Errorf("failed to merge spec: %s", err.Error()) + w.WriteHeader(http.StatusInternalServerError) + return + } + mergedSpecJSON, _ := json.Marshal(mergedSpec) + + if maxLastModified.IsZero() { + maxLastModified = time.Now() + } + + openAPIHandleGroupVersion(w, r, mergedSpecJSON, mergedURL.Query().Get("hash"), maxLastModified) + } +} + +// openAPIHandleGroupVersion is mostly copied from https://github.com/kubernetes/kube-openapi/blob/3c0fae5ee9fdc4e0cb7abff6fd66784a1f0dbcf8/pkg/handler3/handler.go#L222 +func openAPIHandleGroupVersion(w http.ResponseWriter, r *http.Request, data []byte, etag string, lastModified time.Time) { + const ( + subTypeProtobufDeprecated = "com.github.proto-openapi.spec.v3@v1.0+protobuf" + subTypeProtobuf = "com.github.proto-openapi.spec.v3.v1.0+protobuf" + subTypeJSON = "json" + ) + + decipherableFormats := r.Header.Get("Accept") + if decipherableFormats == "" { + decipherableFormats = "*/*" + } + clauses := goautoneg.ParseAccept(decipherableFormats) + w.Header().Add("Vary", "Accept") + + if len(clauses) == 0 { + return + } + + accepted := []struct { + Type string + SubType string + ReturnedContentType string + }{ + {"application", subTypeJSON, "application/" + subTypeJSON}, + {"application", subTypeProtobuf, "application/" + subTypeProtobuf}, + {"application", subTypeProtobufDeprecated, "application/" + subTypeProtobuf}, + } + + for _, clause := range clauses { + for _, accepts := range accepted { + if clause.Type != accepts.Type && clause.Type != "*" { + continue + } + if clause.SubType != accepts.SubType && clause.SubType != "*" { + continue + } + + switch accepts.SubType { + case subTypeProtobuf, subTypeProtobufDeprecated: + var err error + data, err = handler3.ToV3ProtoBinary(data) + if err != nil { + klog.Errorf("failed to convert json to proto: %v", err) + w.WriteHeader(http.StatusInternalServerError) + return + } + } + // Set Content-Type header in the reponse + w.Header().Set("Content-Type", accepts.ReturnedContentType) + + // ETag must be enclosed in double quotes: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag + w.Header().Set("Etag", strconv.Quote(etag)) + + if hash := r.URL.Query().Get("hash"); hash != "" { + // validity of hash checked in handleGroupVersion with isHashCurrent + + // The Vary header is required because the Accept header can + // change the contents returned. This prevents clients from caching + // protobuf as JSON and vice versa. + w.Header().Set("Vary", "Accept") + + // Only set these headers when a hash is given. + w.Header().Set("Cache-Control", "public, immutable") + // Set the Expires directive to the maximum value of one year from the request, + // effectively indicating that the cache never expires. + w.Header().Set("Expires", time.Now().AddDate(1, 0, 0).Format(time.RFC1123)) + } + http.ServeContent(w, r, "", lastModified, bytes.NewReader(data)) + return + } + } + w.WriteHeader(406) + return +} + +func toUniqueRelativeURLs(relativeURLs []handler3.OpenAPIV3DiscoveryGroupVersion) sets.Set[string] { + uniqueURLs := sets.New[string]() + for _, url := range relativeURLs { + uniqueURLs.Insert(url.ServerRelativeURL) + } + return uniqueURLs +} + +func hasPrefix(urls []string, prefix string) bool { + if len(urls) == 0 { + return false + } + for _, url := range urls { + if !strings.HasPrefix(url, prefix) { + return false + } + } + return true +} + +func isHashCurrent(u *neturl.URL, currentETag string) bool { + if hash := u.Query().Get("hash"); len(hash) > 0 { + // check if hash is current only if requested + return hash == currentETag + } + return true +} + +// computeETag is copied from https://github.com/kubernetes/kubernetes/blob/2c6c4566eff972d6c1320b5f8ad795f88c822d09/staging/src/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/etag.go#L76 +func computeETag(data []byte) string { + if data == nil { + return "" + } + return fmt.Sprintf("%X", sha512.Sum512(data)) +} + +func mergeURLETags(delegationURLs []string) (*neturl.URL, error) { + // presume all urls are the same, so take the first one + newURL, err := neturl.Parse(delegationURLs[0]) + if err != nil { + return nil, err + } + if len(delegationURLs) == 1 { + return newURL, nil + } + // sorted, for consistent hash + delegationUniqueURLs := sets.List(sets.New(delegationURLs...)) + delegationUniqueURLsBytes, err := json.Marshal(delegationUniqueURLs) + if err != nil { + return nil, err + } + etag := computeETag(delegationUniqueURLsBytes) + + newQuery := newURL.Query() + newQuery.Set("hash", etag) + newURL.RawQuery = newQuery.Encode() + return newURL, nil +} + +func createNewAPIServiceRequest(from *http.Request, eligibleURL string) (*http.Request, error) { + req := from.Clone(request.WithUser(context.Background(), &user.DefaultInfo{Name: aggregatorUser})) + req.Header.Set("Accept", "application/json") + if hash := req.URL.Query().Get("hash"); len(hash) > 0 { + eligibleParsedURL, err := neturl.Parse(eligibleURL) + if err != nil { + return nil, err + } + // rewrite to include the latest hash for this apiservice + q := req.URL.Query() + q.Set("hash", eligibleParsedURL.Query().Get("hash")) + req.URL.RawQuery = q.Encode() + } + return req, nil +} diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/patch_merge.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/patch_merge.go new file mode 100644 index 0000000000000..4c9be46a28bcf --- /dev/null +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/patch_merge.go @@ -0,0 +1,67 @@ +package aggregator + +import ( + "fmt" + "strings" + + "k8s.io/kube-openapi/pkg/spec3" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// mergeSpecsV3 to prevent a dependency on apiextensions-apiserver, this function is copied from https://github.com/kubernetes/kubernetes/blob/2c6c4566eff972d6c1320b5f8ad795f88c822d09/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapi/builder/merge.go#L105 +// mergeSpecsV3 merges OpenAPI v3 specs for CRDs +// Conflicts belonging to the meta.v1 or autoscaling.v1 group versions are skipped as all CRDs reference those types +// Other conflicts will result in an error +func mergeSpecsV3(crdSpecs ...*spec3.OpenAPI) (*spec3.OpenAPI, error) { + crdSpec := &spec3.OpenAPI{} + if len(crdSpecs) > 0 { + crdSpec.Version = crdSpecs[0].Version + crdSpec.Info = crdSpecs[0].Info + } + for _, s := range crdSpecs { + err := mergeSpecV3(crdSpec, s) + if err != nil { + return nil, err + } + } + return crdSpec, nil +} + +const metadataGV = "io.k8s.apimachinery.pkg.apis.meta.v1" +const autoscalingGV = "io.k8s.api.autoscaling.v1" + +// mergeSpecV3 to prevent a dependency on apiextensions-apiserver, this function is copied from https://github.com/kubernetes/kubernetes/blob/2c6c4566eff972d6c1320b5f8ad795f88c822d09/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapi/builder/merge.go#L123 +// mergeSpecV3 copies paths and definitions from source to dest, mutating dest, but not source. +// Conflicts belonging to the meta.v1 or autoscaling.v1 group versions are skipped as all CRDs reference those types +// Other conflicts will result in an error +func mergeSpecV3(dest, source *spec3.OpenAPI) error { + if source == nil || source.Paths == nil { + return nil + } + if dest.Paths == nil { + dest.Paths = &spec3.Paths{} + } + + for k, v := range source.Components.Schemas { + if dest.Components == nil { + dest.Components = &spec3.Components{} + } + if dest.Components.Schemas == nil { + dest.Components.Schemas = map[string]*spec.Schema{} + } + if _, exists := dest.Components.Schemas[k]; exists { + if strings.HasPrefix(k, metadataGV) || strings.HasPrefix(k, autoscalingGV) { + continue + } + return fmt.Errorf("OpenAPI V3 merge schema conflict on %s", k) + } + dest.Components.Schemas[k] = v + } + for k, v := range source.Paths.Paths { + if dest.Paths.Paths == nil { + dest.Paths.Paths = map[string]*spec3.Path{} + } + dest.Paths.Paths[k] = v + } + return nil +} diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/remote/remote_available_controller.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/remote/remote_available_controller.go index a94e254cd8f04..bd0e390a07c53 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/remote/remote_available_controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/remote/remote_available_controller.go @@ -86,6 +86,9 @@ type AvailableConditionController struct { // metrics registered into legacy registry metrics *availabilitymetrics.Metrics + + // hasBeenReady is signaled when the readyz endpoint succeeds for the first time. + hasBeenReady <-chan struct{} } // New returns a new remote APIService AvailableConditionController. @@ -98,6 +101,7 @@ func New( proxyCurrentCertKeyContent certKeyFunc, serviceResolver ServiceResolver, metrics *availabilitymetrics.Metrics, + hasBeenReady <-chan struct{}, ) (*AvailableConditionController, error) { c := &AvailableConditionController{ apiServiceClient: apiServiceClient, @@ -115,6 +119,7 @@ func New( proxyTransportDial: proxyTransportDial, proxyCurrentCertKeyContent: proxyCurrentCertKeyContent, metrics: metrics, + hasBeenReady: hasBeenReady, } // resync on this one because it is low cardinality and rechecking the actual discovery @@ -164,6 +169,18 @@ func (c *AvailableConditionController) sync(key string) error { return nil } + // the availability checks depend on fully initialized SDN + // OpenShift carries a few reachability checks that affect /readyz protocol + // record availability of the server so that we can + // skip posting failures to avoid getting false positives until the server becomes ready + hasBeenReady := false + select { + case <-c.hasBeenReady: + hasBeenReady = true + default: + // continue, we will skip posting only potential failures + } + apiService := originalAPIService.DeepCopy() // if a particular transport was specified, use that otherwise build one @@ -347,6 +364,11 @@ func (c *AvailableConditionController) sync(key string) error { } if lastError != nil { + if !hasBeenReady { + // returning an error will requeue the item in an exponential fashion + return fmt.Errorf("the server hasn't been ready yet, skipping updating availability of the aggreaged API until the server becomes ready to avoid false positives, lastError = %v", lastError) + } + availableCondition.Status = apiregistrationv1.ConditionFalse availableCondition.Reason = "FailedDiscoveryCheck" availableCondition.Message = lastError.Error() diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/remote/remote_available_controller_test.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/remote/remote_available_controller_test.go index acfe9ba3952f7..7bfef14996e09 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/remote/remote_available_controller_test.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/remote/remote_available_controller_test.go @@ -128,6 +128,9 @@ func setupAPIServices(t T, apiServices []runtime.Object) (*AvailableConditionCon } } + alwaysReadyChan := make(chan struct{}) + close(alwaysReadyChan) + c := AvailableConditionController{ apiServiceClient: fakeClient.ApiregistrationV1(), apiServiceLister: listers.NewAPIServiceLister(apiServiceIndexer), @@ -141,7 +144,8 @@ func setupAPIServices(t T, apiServices []runtime.Object) (*AvailableConditionCon workqueue.NewTypedItemExponentialFailureRateLimiter[string](5*time.Millisecond, 30*time.Second), workqueue.TypedRateLimitingQueueConfig[string]{Name: "AvailableConditionController"}, ), - metrics: availabilitymetrics.New(), + metrics: availabilitymetrics.New(), + hasBeenReady: alwaysReadyChan, } for _, svc := range apiServices { c.addAPIService(svc) @@ -401,6 +405,8 @@ func TestSync(t *testing.T) { w.WriteHeader(tc.backendStatus) })) defer testServer.Close() + alwaysReadyChan := make(chan struct{}) + close(alwaysReadyChan) c := AvailableConditionController{ apiServiceClient: fakeClient.ApiregistrationV1(), @@ -410,6 +416,7 @@ func TestSync(t *testing.T) { serviceResolver: &fakeServiceResolver{url: testServer.URL}, proxyCurrentCertKeyContent: func() ([]byte, []byte) { return emptyCert(), emptyCert() }, metrics: availabilitymetrics.New(), + hasBeenReady: alwaysReadyChan, } err := c.sync(tc.apiServiceName) if tc.expectedSyncError != "" { diff --git a/staging/src/k8s.io/kube-controller-manager/go.mod b/staging/src/k8s.io/kube-controller-manager/go.mod index dc62a84f39966..d8d9765717d23 100644 --- a/staging/src/k8s.io/kube-controller-manager/go.mod +++ b/staging/src/k8s.io/kube-controller-manager/go.mod @@ -9,7 +9,7 @@ godebug default=go1.23 godebug winsymlink=0 require ( - k8s.io/apimachinery v0.0.0 + k8s.io/apimachinery v0.32.0 k8s.io/cloud-provider v0.0.0 k8s.io/controller-manager v0.0.0 ) @@ -29,7 +29,7 @@ require ( golang.org/x/text v0.19.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/inf.v0 v0.9.1 // indirect - k8s.io/component-base v0.0.0 // indirect + k8s.io/component-base v0.32.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect @@ -38,6 +38,8 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/apiserver => ../apiserver diff --git a/staging/src/k8s.io/kube-controller-manager/go.sum b/staging/src/k8s.io/kube-controller-manager/go.sum index 3c66da350b101..fac820d08047f 100644 --- a/staging/src/k8s.io/kube-controller-manager/go.sum +++ b/staging/src/k8s.io/kube-controller-manager/go.sum @@ -15,6 +15,7 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9/go.mod h1:vbBfvIsLddBDFa0WF+id4m7KuQmNRsVUBH5zIZa2EcQ= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= @@ -67,8 +68,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -105,6 +106,7 @@ go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUis go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -145,9 +147,9 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/staging/src/k8s.io/kube-proxy/go.mod b/staging/src/k8s.io/kube-proxy/go.mod index 21293460f9aae..7c986c42a42be 100644 --- a/staging/src/k8s.io/kube-proxy/go.mod +++ b/staging/src/k8s.io/kube-proxy/go.mod @@ -51,6 +51,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/kube-proxy/go.sum b/staging/src/k8s.io/kube-proxy/go.sum index 5e15223a67a35..212dbea9459f3 100644 --- a/staging/src/k8s.io/kube-proxy/go.sum +++ b/staging/src/k8s.io/kube-proxy/go.sum @@ -67,8 +67,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= @@ -148,9 +148,9 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/kube-scheduler/go.mod b/staging/src/k8s.io/kube-scheduler/go.mod index 31b3cf1a3a271..c789b6a3fd1aa 100644 --- a/staging/src/k8s.io/kube-scheduler/go.mod +++ b/staging/src/k8s.io/kube-scheduler/go.mod @@ -36,6 +36,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/kube-scheduler/go.sum b/staging/src/k8s.io/kube-scheduler/go.sum index 56805d17234f2..6ff912da01536 100644 --- a/staging/src/k8s.io/kube-scheduler/go.sum +++ b/staging/src/k8s.io/kube-scheduler/go.sum @@ -54,8 +54,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= @@ -124,9 +124,9 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/staging/src/k8s.io/kubectl/go.mod b/staging/src/k8s.io/kubectl/go.mod index 40dcb04b9d93c..a3c69f14d86aa 100644 --- a/staging/src/k8s.io/kubectl/go.mod +++ b/staging/src/k8s.io/kubectl/go.mod @@ -22,7 +22,7 @@ require ( github.com/lithammer/dedent v1.1.0 github.com/mitchellh/go-wordwrap v1.0.1 github.com/moby/term v0.5.0 - github.com/onsi/ginkgo/v2 v2.21.0 + github.com/onsi/ginkgo/v2 v2.20.2 github.com/onsi/gomega v1.35.1 github.com/pkg/errors v0.9.1 github.com/russross/blackfriday/v2 v2.1.0 @@ -97,6 +97,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/cli-runtime => ../cli-runtime diff --git a/staging/src/k8s.io/kubectl/go.sum b/staging/src/k8s.io/kubectl/go.sum index f7008ed6bba77..94d76ac6060f6 100644 --- a/staging/src/k8s.io/kubectl/go.sum +++ b/staging/src/k8s.io/kubectl/go.sum @@ -116,12 +116,12 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -220,9 +220,9 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/kubelet/go.mod b/staging/src/k8s.io/kubelet/go.mod index 3e3bc92918e2b..012585055a7fd 100644 --- a/staging/src/k8s.io/kubelet/go.mod +++ b/staging/src/k8s.io/kubelet/go.mod @@ -12,12 +12,12 @@ require ( github.com/emicklei/go-restful/v3 v3.11.0 github.com/gogo/protobuf v1.3.2 github.com/stretchr/testify v1.9.0 - google.golang.org/grpc v1.65.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 + google.golang.org/grpc v1.67.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 k8s.io/apiserver v0.0.0 - k8s.io/client-go v0.0.0 - k8s.io/component-base v0.0.0 + k8s.io/client-go v0.32.0 + k8s.io/component-base v0.32.0 k8s.io/cri-api v0.0.0 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 @@ -57,7 +57,7 @@ require ( golang.org/x/term v0.25.0 // indirect golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.7.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect @@ -67,6 +67,8 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/apiserver => ../apiserver diff --git a/staging/src/k8s.io/kubelet/go.sum b/staging/src/k8s.io/kubelet/go.sum index 789ef1b188873..76650bae74937 100644 --- a/staging/src/k8s.io/kubelet/go.sum +++ b/staging/src/k8s.io/kubelet/go.sum @@ -1,5 +1,5 @@ cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= @@ -16,7 +16,7 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -26,11 +26,12 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9/go.mod h1:vbBfvIsLddBDFa0WF+id4m7KuQmNRsVUBH5zIZa2EcQ= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= @@ -52,7 +53,7 @@ github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= @@ -106,12 +107,13 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -165,6 +167,7 @@ go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8 go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= @@ -213,11 +216,11 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/kubelet/pkg/apis/well_known_openshift_labels.go b/staging/src/k8s.io/kubelet/pkg/apis/well_known_openshift_labels.go new file mode 100644 index 0000000000000..9535c1702c10b --- /dev/null +++ b/staging/src/k8s.io/kubelet/pkg/apis/well_known_openshift_labels.go @@ -0,0 +1,43 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apis + +import ( + "k8s.io/apimachinery/pkg/util/sets" +) + +const ( + NodeLabelControlPlane = "node-role.kubernetes.io/control-plane" + NodeLabelMaster = "node-role.kubernetes.io/master" + NodeLabelWorker = "node-role.kubernetes.io/worker" + NodeLabelEtcd = "node-role.kubernetes.io/etcd" +) + +var openshiftNodeLabels = sets.NewString( + NodeLabelControlPlane, + NodeLabelMaster, + NodeLabelWorker, + NodeLabelEtcd, +) + +func OpenShiftNodeLabels() []string { + return openshiftNodeLabels.List() +} + +func IsForbiddenOpenshiftLabel(label string) bool { + return openshiftNodeLabels.Has(label) +} diff --git a/staging/src/k8s.io/metrics/go.mod b/staging/src/k8s.io/metrics/go.mod index 192a37946b86f..7f369c3a17d24 100644 --- a/staging/src/k8s.io/metrics/go.mod +++ b/staging/src/k8s.io/metrics/go.mod @@ -63,6 +63,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/metrics/go.sum b/staging/src/k8s.io/metrics/go.sum index 15be7434f3223..bfeda1c02887c 100644 --- a/staging/src/k8s.io/metrics/go.sum +++ b/staging/src/k8s.io/metrics/go.sum @@ -66,10 +66,10 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/staging/src/k8s.io/pod-security-admission/go.mod b/staging/src/k8s.io/pod-security-admission/go.mod index ebb5e81bb1c2e..ed6ef6bd4f151 100644 --- a/staging/src/k8s.io/pod-security-admission/go.mod +++ b/staging/src/k8s.io/pod-security-admission/go.mod @@ -14,11 +14,11 @@ require ( github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/apiserver v0.0.0 - k8s.io/client-go v0.0.0 - k8s.io/component-base v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/apiserver v0.32.0 + k8s.io/client-go v0.32.0 + k8s.io/component-base v0.32.0 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 sigs.k8s.io/yaml v1.4.0 @@ -61,6 +61,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/openshift/library-go v0.0.0-20241212055402-9dbaddb63ab9 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.19.1 // indirect @@ -81,6 +82,7 @@ require ( go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.28.0 // indirect @@ -92,15 +94,15 @@ require ( golang.org/x/term v0.25.0 // indirect golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.7.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/grpc v1.65.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect + google.golang.org/grpc v1.67.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/kms v0.0.0 // indirect + k8s.io/kms v0.32.0 // indirect k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect @@ -108,10 +110,16 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + github.com/openshift/api => github.com/dusk125/api v0.0.0-20241212053709-6b333900129e + github.com/openshift/client-go => github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385 + github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 k8s.io/api => ../api + k8s.io/apiextensions-apiserver => ../apiextensions-apiserver k8s.io/apimachinery => ../apimachinery k8s.io/apiserver => ../apiserver k8s.io/client-go => ../client-go k8s.io/component-base => ../component-base k8s.io/kms => ../kms + k8s.io/kube-aggregator => ../kube-aggregator ) diff --git a/staging/src/k8s.io/pod-security-admission/go.sum b/staging/src/k8s.io/pod-security-admission/go.sum index 723f969914195..9ca373333c8a2 100644 --- a/staging/src/k8s.io/pod-security-admission/go.sum +++ b/staging/src/k8s.io/pod-security-admission/go.sum @@ -1,9 +1,11 @@ cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/RangelReale/osincli v0.0.0-20160924135400-fababb0555f2/go.mod h1:XyjUkMA8GN+tOOPXvnbi3XuRxWFvTJntqvTFnjmhzbk= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -21,7 +23,7 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= @@ -33,19 +35,31 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/distribution/v3 v3.0.0-20230511163743-f7717b7855ca/go.mod h1:t1IxPNGdTGez+YGKyJyQrtSSqisfMIm1hnFhvMPlxtE= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/dusk125/api v0.0.0-20241212053709-6b333900129e/go.mod h1:lvUN3WEfcZlZxWNEhBKGAbW1UqaIexBLqcYIMXQDh2c= +github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385/go.mod h1:yv2o2+uOZRWD4E30SHdQ66mtcpV1qL0Px03vYjrvM4s= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 h1:83mHQ9+8+Fd+6Zb5aNPiUhgjCUiRCHbe6HuTFA2us78= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9/go.mod h1:vbBfvIsLddBDFa0WF+id4m7KuQmNRsVUBH5zIZa2EcQ= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap/v3 v3.4.3/go.mod h1:7LdHfVt6iIOESVEe3Bs4Jp2sHEKgDeduAhgM1/f9qmo= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -69,9 +83,15 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= +github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= +github.com/gonum/graph v0.0.0-20170401004347-50b27dea7ebb/go.mod h1:ye018NnX1zrbOLqwBvs2HqyyTouQgnL8C+qzYk1snPY= +github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks= +github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A= +github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= @@ -88,6 +108,7 @@ github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgY github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -99,6 +120,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= @@ -132,17 +155,25 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.1.13/go.mod h1:R016aXacfp/gwQBYw2FDGa9m+n6atbLWrYY8hNMT/sA= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0/go.mod h1:wAR5JopumPtAZnu0Cjv2PSqV4p4QB09LMhc6fZZTXuA= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= @@ -151,6 +182,7 @@ github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= @@ -218,6 +250,7 @@ go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -274,12 +307,12 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -307,6 +340,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcp sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/staging/src/k8s.io/pod-security-admission/metrics/metrics.go b/staging/src/k8s.io/pod-security-admission/metrics/metrics.go index e5e1947182dc9..948911e596042 100644 --- a/staging/src/k8s.io/pod-security-admission/metrics/metrics.go +++ b/staging/src/k8s.io/pod-security-admission/metrics/metrics.go @@ -98,15 +98,28 @@ func (r *PrometheusRecorder) RecordEvaluation(decision Decision, policy api.Leve } } - r.evaluationsCounter.CachedInc(evaluationsLabels{ - decision: string(decision), - level: string(policy.Level), - version: version, - mode: string(evalMode), - operation: operationLabel(attrs.GetOperation()), - resource: resourceLabel(attrs.GetResource()), - subresource: attrs.GetSubresource(), - }) + // prevent cardinality explosion by only recording the platform namespaces + namespace := attrs.GetNamespace() + if !(namespace == "openshift" || + strings.HasPrefix(namespace, "openshift-") || + strings.HasPrefix(namespace, "kube-") || + namespace == "default") { + // remove non-OpenShift platform namespace names to prevent cardinality explosion + namespace = "" + } + + el := evaluationsLabels{ + decision: string(decision), + level: string(policy.Level), + version: version, + mode: string(evalMode), + operation: operationLabel(attrs.GetOperation()), + resource: resourceLabel(attrs.GetResource()), + subresource: attrs.GetSubresource(), + ocpNamespace: namespace, + } + + r.evaluationsCounter.CachedInc(el) } func (r *PrometheusRecorder) RecordExemption(attrs api.Attributes) { @@ -156,17 +169,18 @@ func operationLabel(op admissionv1.Operation) string { } type evaluationsLabels struct { - decision string - level string - version string - mode string - operation string - resource string - subresource string + decision string + level string + version string + mode string + operation string + resource string + subresource string + ocpNamespace string } func (l *evaluationsLabels) labels() []string { - return []string{l.decision, l.level, l.version, l.mode, l.operation, l.resource, l.subresource} + return []string{l.decision, l.level, l.version, l.mode, l.operation, l.resource, l.subresource, l.ocpNamespace} } type exemptionsLabels struct { @@ -194,7 +208,7 @@ func newEvaluationsCounter() *evaluationsCounter { Help: "Number of policy evaluations that occurred, not counting ignored or exempt requests.", StabilityLevel: metrics.ALPHA, }, - []string{"decision", "policy_level", "policy_version", "mode", "request_operation", "resource", "subresource"}, + []string{"decision", "policy_level", "policy_version", "mode", "request_operation", "resource", "subresource", "ocp_namespace"}, ), cache: make(map[evaluationsLabels]metrics.CounterMetric), } @@ -231,8 +245,8 @@ func (c *evaluationsCounter) Reset() { func (c *evaluationsCounter) populateCache() { labelsToCache := []evaluationsLabels{ - {decision: "allow", level: "privileged", version: "latest", mode: "enforce", operation: "create", resource: "pod", subresource: ""}, - {decision: "allow", level: "privileged", version: "latest", mode: "enforce", operation: "update", resource: "pod", subresource: ""}, + {decision: "allow", level: "privileged", version: "latest", mode: "enforce", operation: "create", resource: "pod", subresource: "", ocpNamespace: ""}, + {decision: "allow", level: "privileged", version: "latest", mode: "enforce", operation: "update", resource: "pod", subresource: "", ocpNamespace: ""}, } for _, l := range labelsToCache { c.cache[l] = c.CounterVec.WithLabelValues(l.labels()...) diff --git a/staging/src/k8s.io/pod-security-admission/metrics/metrics_test.go b/staging/src/k8s.io/pod-security-admission/metrics/metrics_test.go index 657e1aa6d56e0..cd9e3e7fea04f 100644 --- a/staging/src/k8s.io/pod-security-admission/metrics/metrics_test.go +++ b/staging/src/k8s.io/pod-security-admission/metrics/metrics_test.go @@ -71,15 +71,17 @@ func TestRecordEvaluation(t *testing.T) { recorder.RecordEvaluation(decision, levelVersion(level, version), mode, &api.AttributesRecord{ Resource: resource, Operation: op, + Namespace: "some-namespace", }) if level == api.LevelPrivileged { expectedVersion = "latest" } + expected := fmt.Sprintf(` # HELP pod_security_evaluations_total [ALPHA] Number of policy evaluations that occurred, not counting ignored or exempt requests. # TYPE pod_security_evaluations_total counter - pod_security_evaluations_total{decision="%s",mode="%s",policy_level="%s",policy_version="%s",request_operation="%s",resource="%s",subresource=""} 1 + pod_security_evaluations_total{decision="%s",mode="%s",ocp_namespace="",policy_level="%s",policy_version="%s",request_operation="%s",resource="%s",subresource=""} 1 `, decision, mode, level, expectedVersion, strings.ToLower(string(op)), expectedResource) expected = expectCachedMetrics("pod_security_evaluations_total", expected) @@ -162,8 +164,8 @@ func levelVersion(level api.Level, version string) api.LevelVersion { // The cached metrics should always be present (value 0 if not counted). var expectedCachedMetrics = map[string][]string{ "pod_security_evaluations_total": { - `pod_security_evaluations_total{decision="allow",mode="enforce",policy_level="privileged",policy_version="latest",request_operation="create",resource="pod",subresource=""}`, - `pod_security_evaluations_total{decision="allow",mode="enforce",policy_level="privileged",policy_version="latest",request_operation="update",resource="pod",subresource=""}`, + `pod_security_evaluations_total{decision="allow",mode="enforce",ocp_namespace="",policy_level="privileged",policy_version="latest",request_operation="create",resource="pod",subresource=""}`, + `pod_security_evaluations_total{decision="allow",mode="enforce",ocp_namespace="",policy_level="privileged",policy_version="latest",request_operation="update",resource="pod",subresource=""}`, }, "pod_security_exemptions_total": { `pod_security_exemptions_total{request_operation="create",resource="controller",subresource=""}`, diff --git a/staging/src/k8s.io/sample-apiserver/go.mod b/staging/src/k8s.io/sample-apiserver/go.mod index df3797c3f894e..632c796d85b2b 100644 --- a/staging/src/k8s.io/sample-apiserver/go.mod +++ b/staging/src/k8s.io/sample-apiserver/go.mod @@ -12,11 +12,11 @@ require ( github.com/google/gofuzz v1.2.0 github.com/spf13/cobra v1.8.1 github.com/stretchr/testify v1.9.0 - k8s.io/apimachinery v0.0.0 - k8s.io/apiserver v0.0.0 - k8s.io/client-go v0.0.0 + k8s.io/apimachinery v0.32.0 + k8s.io/apiserver v0.32.0 + k8s.io/client-go v0.32.0 k8s.io/code-generator v0.0.0 - k8s.io/component-base v0.0.0 + k8s.io/component-base v0.32.0 k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 sigs.k8s.io/structured-merge-diff/v4 v4.4.2 @@ -59,6 +59,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/openshift/library-go v0.0.0-20241212055402-9dbaddb63ab9 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.19.1 // indirect @@ -80,6 +81,7 @@ require ( go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.28.0 // indirect @@ -93,29 +95,35 @@ require ( golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.26.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/grpc v1.65.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect + google.golang.org/grpc v1.67.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.0.0 // indirect + k8s.io/api v0.32.0 // indirect k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kms v0.0.0 // indirect + k8s.io/kms v0.32.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + github.com/openshift/api => github.com/dusk125/api v0.0.0-20241212053709-6b333900129e + github.com/openshift/client-go => github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385 + github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 k8s.io/api => ../api + k8s.io/apiextensions-apiserver => ../apiextensions-apiserver k8s.io/apimachinery => ../apimachinery k8s.io/apiserver => ../apiserver k8s.io/client-go => ../client-go k8s.io/code-generator => ../code-generator k8s.io/component-base => ../component-base k8s.io/kms => ../kms + k8s.io/kube-aggregator => ../kube-aggregator ) diff --git a/staging/src/k8s.io/sample-apiserver/go.sum b/staging/src/k8s.io/sample-apiserver/go.sum index 32e5c2b5f30e8..b6930109beb34 100644 --- a/staging/src/k8s.io/sample-apiserver/go.sum +++ b/staging/src/k8s.io/sample-apiserver/go.sum @@ -1,9 +1,11 @@ cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/RangelReale/osincli v0.0.0-20160924135400-fababb0555f2/go.mod h1:XyjUkMA8GN+tOOPXvnbi3XuRxWFvTJntqvTFnjmhzbk= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -21,7 +23,7 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= @@ -33,19 +35,31 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/distribution/v3 v3.0.0-20230511163743-f7717b7855ca/go.mod h1:t1IxPNGdTGez+YGKyJyQrtSSqisfMIm1hnFhvMPlxtE= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/dusk125/api v0.0.0-20241212053709-6b333900129e/go.mod h1:lvUN3WEfcZlZxWNEhBKGAbW1UqaIexBLqcYIMXQDh2c= +github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385/go.mod h1:yv2o2+uOZRWD4E30SHdQ66mtcpV1qL0Px03vYjrvM4s= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 h1:83mHQ9+8+Fd+6Zb5aNPiUhgjCUiRCHbe6HuTFA2us78= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9/go.mod h1:vbBfvIsLddBDFa0WF+id4m7KuQmNRsVUBH5zIZa2EcQ= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap/v3 v3.4.3/go.mod h1:7LdHfVt6iIOESVEe3Bs4Jp2sHEKgDeduAhgM1/f9qmo= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -69,9 +83,15 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= +github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= +github.com/gonum/graph v0.0.0-20170401004347-50b27dea7ebb/go.mod h1:ye018NnX1zrbOLqwBvs2HqyyTouQgnL8C+qzYk1snPY= +github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks= +github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A= +github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= @@ -88,6 +108,7 @@ github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgY github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -99,6 +120,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= @@ -132,17 +155,25 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.1.13/go.mod h1:R016aXacfp/gwQBYw2FDGa9m+n6atbLWrYY8hNMT/sA= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0/go.mod h1:wAR5JopumPtAZnu0Cjv2PSqV4p4QB09LMhc6fZZTXuA= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= @@ -151,6 +182,7 @@ github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= @@ -219,6 +251,7 @@ go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -277,12 +310,12 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -311,6 +344,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcp sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/staging/src/k8s.io/sample-cli-plugin/go.mod b/staging/src/k8s.io/sample-cli-plugin/go.mod index 38b33ba9042e6..3ee6d060311df 100644 --- a/staging/src/k8s.io/sample-cli-plugin/go.mod +++ b/staging/src/k8s.io/sample-cli-plugin/go.mod @@ -73,6 +73,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/cli-runtime => ../cli-runtime diff --git a/staging/src/k8s.io/sample-cli-plugin/go.sum b/staging/src/k8s.io/sample-cli-plugin/go.sum index fdd3895bceb06..bb7f2aa389f5a 100644 --- a/staging/src/k8s.io/sample-cli-plugin/go.sum +++ b/staging/src/k8s.io/sample-cli-plugin/go.sum @@ -87,10 +87,10 @@ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= diff --git a/staging/src/k8s.io/sample-controller/go.mod b/staging/src/k8s.io/sample-controller/go.mod index d0d27ffb89a19..68153773e12ee 100644 --- a/staging/src/k8s.io/sample-controller/go.mod +++ b/staging/src/k8s.io/sample-controller/go.mod @@ -61,6 +61,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/sample-controller/go.sum b/staging/src/k8s.io/sample-controller/go.sum index a50a01ece01dd..04406641bd110 100644 --- a/staging/src/k8s.io/sample-controller/go.sum +++ b/staging/src/k8s.io/sample-controller/go.sum @@ -66,10 +66,10 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/test/conformance/image/go-runner/testdata/tartest/out.tar.gz b/test/conformance/image/go-runner/testdata/tartest/out.tar.gz new file mode 100644 index 0000000000000..7cb5eb8413a34 Binary files /dev/null and b/test/conformance/image/go-runner/testdata/tartest/out.tar.gz differ diff --git a/test/e2e/apimachinery/crd_publish_openapi.go b/test/e2e/apimachinery/crd_publish_openapi.go index ac816997810ba..a5487a88fe0fe 100644 --- a/test/e2e/apimachinery/crd_publish_openapi.go +++ b/test/e2e/apimachinery/crd_publish_openapi.go @@ -522,7 +522,13 @@ func setupCRDAndVerifySchema(f *framework.Framework, schema, expect []byte, grou return setupCRDAndVerifySchemaWithOptions(f, schema, expect, groupSuffix, versions) } -func setupCRDAndVerifySchemaWithOptions(f *framework.Framework, schema, expect []byte, groupSuffix string, versions []string, options ...crd.Option) (*crd.TestCrd, error) { +func setupCRDAndVerifySchemaWithOptions(f *framework.Framework, schema, expect []byte, groupSuffix string, versions []string, options ...crd.Option) (tCRD *crd.TestCrd, err error) { + defer func() { + if err != nil { + framework.Logf("sleeping 45 seconds before running the actual tests, we hope that during all API servers converge during that window, see %q for more", "https://github.com/kubernetes/kubernetes/pull/90452") + time.Sleep(time.Second * 45) + } + }() group := fmt.Sprintf("%s-test-%s.example.com", f.BaseName, groupSuffix) if len(versions) == 0 { return nil, fmt.Errorf("require at least one version for CRD") @@ -560,17 +566,17 @@ func setupCRDAndVerifySchemaWithOptions(f *framework.Framework, schema, expect [ } crd.Spec.Versions = apiVersions }) - crd, err := crd.CreateMultiVersionTestCRD(f, group, options...) + tCRD, err = crd.CreateMultiVersionTestCRD(f, group, options...) if err != nil { return nil, fmt.Errorf("failed to create CRD: %w", err) } - for _, v := range crd.Crd.Spec.Versions { - if err := waitForDefinition(f.ClientSet, definitionName(crd, v.Name), expect); err != nil { + for _, v := range tCRD.Crd.Spec.Versions { + if err := waitForDefinition(f.ClientSet, definitionName(tCRD, v.Name), expect); err != nil { return nil, fmt.Errorf("%v", err) } } - return crd, nil + return tCRD, nil } func cleanupCRD(ctx context.Context, f *framework.Framework, crd *crd.TestCrd) error { diff --git a/test/e2e/apimachinery/watchlist.go b/test/e2e/apimachinery/watchlist.go index c99e79d848869..b319f68ed372d 100644 --- a/test/e2e/apimachinery/watchlist.go +++ b/test/e2e/apimachinery/watchlist.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "net/http" + "net/url" "sort" "strings" "time" @@ -53,12 +54,14 @@ var _ = SIGDescribe("API Streaming (aka. WatchList)", framework.WithSerial(), fu featuregatetesting.SetFeatureGateDuringTest(ginkgo.GinkgoTB(), utilfeature.DefaultFeatureGate, featuregate.Feature(clientfeatures.WatchListClient), true) stopCh := make(chan struct{}) defer close(stopCh) + secretInformer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return nil, fmt.Errorf("unexpected list call") }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options = withWellKnownListOptions(options) return f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Watch(context.TODO(), options) }, }, @@ -101,7 +104,7 @@ var _ = SIGDescribe("API Streaming (aka. WatchList)", framework.WithSerial(), fu framework.ExpectNoError(err) ginkgo.By("Streaming secrets from the server") - secretList, err := wrappedKubeClient.CoreV1().Secrets(f.Namespace.Name).List(ctx, metav1.ListOptions{}) + secretList, err := wrappedKubeClient.CoreV1().Secrets(f.Namespace.Name).List(ctx, withWellKnownListOptions(metav1.ListOptions{})) framework.ExpectNoError(err) ginkgo.By("Verifying if the secret list was properly streamed") @@ -109,8 +112,8 @@ var _ = SIGDescribe("API Streaming (aka. WatchList)", framework.WithSerial(), fu gomega.Expect(cmp.Equal(expectedSecrets, streamedSecrets)).To(gomega.BeTrueBecause("data received via watchlist must match the added data")) ginkgo.By("Verifying if expected requests were sent to the server") - expectedRequestMadeByKubeClient := getExpectedRequestMadeByClientFor(secretList.ResourceVersion) - gomega.Expect(rt.actualRequests).To(gomega.Equal(expectedRequestMadeByKubeClient)) + expectedRequestsMadeByKubeClient := getExpectedRequestsMadeByClientFor(secretList.ResourceVersion) + gomega.Expect(rt.actualRequests).To(gomega.Equal(expectedRequestsMadeByKubeClient)) }) ginkgo.It("should be requested by dynamic client's List method when WatchListClient is enabled", func(ctx context.Context) { featuregatetesting.SetFeatureGateDuringTest(ginkgo.GinkgoTB(), utilfeature.DefaultFeatureGate, featuregate.Feature(clientfeatures.WatchListClient), true) @@ -123,7 +126,7 @@ var _ = SIGDescribe("API Streaming (aka. WatchList)", framework.WithSerial(), fu framework.ExpectNoError(err) ginkgo.By("Streaming secrets from the server") - secretList, err := wrappedDynamicClient.Resource(v1.SchemeGroupVersion.WithResource("secrets")).Namespace(f.Namespace.Name).List(ctx, metav1.ListOptions{}) + secretList, err := wrappedDynamicClient.Resource(v1.SchemeGroupVersion.WithResource("secrets")).Namespace(f.Namespace.Name).List(ctx, withWellKnownListOptions(metav1.ListOptions{})) framework.ExpectNoError(err) ginkgo.By("Verifying if the secret list was properly streamed") @@ -132,8 +135,8 @@ var _ = SIGDescribe("API Streaming (aka. WatchList)", framework.WithSerial(), fu gomega.Expect(secretList.GetObjectKind().GroupVersionKind()).To(gomega.Equal(v1.SchemeGroupVersion.WithKind("SecretList"))) ginkgo.By("Verifying if expected requests were sent to the server") - expectedRequestMadeByDynamicClient := getExpectedRequestMadeByClientFor(secretList.GetResourceVersion()) - gomega.Expect(rt.actualRequests).To(gomega.Equal(expectedRequestMadeByDynamicClient)) + expectedRequestsMadeByDynamicClient := getExpectedRequestsMadeByClientFor(secretList.GetResourceVersion()) + gomega.Expect(rt.actualRequests).To(gomega.Equal(expectedRequestsMadeByDynamicClient)) }) ginkgo.It("should be requested by metadata client's List method when WatchListClient is enabled", func(ctx context.Context) { featuregatetesting.SetFeatureGateDuringTest(ginkgo.GinkgoTB(), utilfeature.DefaultFeatureGate, featuregate.Feature(clientfeatures.WatchListClient), true) @@ -152,7 +155,7 @@ var _ = SIGDescribe("API Streaming (aka. WatchList)", framework.WithSerial(), fu framework.ExpectNoError(err) ginkgo.By("Streaming secrets metadata from the server") - secretMetaList, err := wrappedMetaClient.Resource(v1.SchemeGroupVersion.WithResource("secrets")).Namespace(f.Namespace.Name).List(ctx, metav1.ListOptions{}) + secretMetaList, err := wrappedMetaClient.Resource(v1.SchemeGroupVersion.WithResource("secrets")).Namespace(f.Namespace.Name).List(ctx, withWellKnownListOptions(metav1.ListOptions{})) framework.ExpectNoError(err) ginkgo.By("Verifying if the secret meta list was properly streamed") @@ -160,8 +163,8 @@ var _ = SIGDescribe("API Streaming (aka. WatchList)", framework.WithSerial(), fu gomega.Expect(cmp.Equal(expectedMetaSecrets, streamedMetaSecrets)).To(gomega.BeTrueBecause("data received via watchlist must match the added data")) ginkgo.By("Verifying if expected requests were sent to the server") - expectedRequestMadeByMetaClient := getExpectedRequestMadeByClientFor(secretMetaList.GetResourceVersion()) - gomega.Expect(rt.actualRequests).To(gomega.Equal(expectedRequestMadeByMetaClient)) + expectedRequestsMadeByMetaClient := getExpectedRequestsMadeByClientFor(secretMetaList.GetResourceVersion()) + gomega.Expect(rt.actualRequests).To(gomega.Equal(expectedRequestsMadeByMetaClient)) }) // Validates unsupported Accept headers in WatchList. @@ -186,14 +189,14 @@ var _ = SIGDescribe("API Streaming (aka. WatchList)", framework.WithSerial(), fu // note that the client in case of an error (406) will fall back // to a standard list request thus the overall call passes ginkgo.By("Streaming secrets as Table from the server") - secretTable, err := wrappedDynamicClient.Resource(v1.SchemeGroupVersion.WithResource("secrets")).Namespace(f.Namespace.Name).List(ctx, metav1.ListOptions{}) + secretTable, err := wrappedDynamicClient.Resource(v1.SchemeGroupVersion.WithResource("secrets")).Namespace(f.Namespace.Name).List(ctx, withWellKnownListOptions(metav1.ListOptions{})) framework.ExpectNoError(err) gomega.Expect(secretTable.GetObjectKind().GroupVersionKind()).To(gomega.Equal(metav1.SchemeGroupVersion.WithKind("Table"))) ginkgo.By("Verifying if expected response was sent by the server") gomega.Expect(rt.actualResponseStatuses[0]).To(gomega.Equal("406 Not Acceptable")) - expectedRequestMadeByDynamicClient := getExpectedRequestMadeByClientWhenFallbackToListFor(secretTable.GetResourceVersion()) - gomega.Expect(rt.actualRequests).To(gomega.Equal(expectedRequestMadeByDynamicClient)) + expectedRequestsMadeByDynamicClient := getExpectedRequestsMadeByClientWhenFallbackToListFor(secretTable.GetResourceVersion()) + gomega.Expect(rt.actualRequests).To(gomega.Equal(expectedRequestsMadeByDynamicClient)) }) @@ -217,7 +220,7 @@ var _ = SIGDescribe("API Streaming (aka. WatchList)", framework.WithSerial(), fu wrappedDynamicClient := dynamic.New(restClient) ginkgo.By("Streaming secrets from the server") - secretList, err := wrappedDynamicClient.Resource(v1.SchemeGroupVersion.WithResource("secrets")).Namespace(f.Namespace.Name).List(ctx, metav1.ListOptions{}) + secretList, err := wrappedDynamicClient.Resource(v1.SchemeGroupVersion.WithResource("secrets")).Namespace(f.Namespace.Name).List(ctx, withWellKnownListOptions(metav1.ListOptions{})) framework.ExpectNoError(err) ginkgo.By("Verifying if the secret list was properly streamed") @@ -225,8 +228,8 @@ var _ = SIGDescribe("API Streaming (aka. WatchList)", framework.WithSerial(), fu gomega.Expect(cmp.Equal(expectedSecrets, streamedSecrets)).To(gomega.BeTrueBecause("data received via watchlist must match the added data")) ginkgo.By("Verifying if expected requests were sent to the server") - expectedRequestMadeByDynamicClient := getExpectedRequestMadeByClientFor(secretList.GetResourceVersion()) - gomega.Expect(rt.actualRequests).To(gomega.Equal(expectedRequestMadeByDynamicClient)) + expectedRequestsMadeByDynamicClient := getExpectedRequestsMadeByClientFor(secretList.GetResourceVersion()) + gomega.Expect(rt.actualRequests).To(gomega.Equal(expectedRequestsMadeByDynamicClient)) }) }) @@ -274,28 +277,48 @@ func verifyStore(ctx context.Context, expectedSecrets []v1.Secret, store cache.S } // corresponds to a streaming request made by the client to stream the secrets -const expectedStreamingRequestMadeByClient string = "allowWatchBookmarks=true&resourceVersionMatch=NotOlderThan&sendInitialEvents=true&watch=true" +func getExpectedStreamingRequestMadeByClient() string { + params := url.Values{} + params.Add("allowWatchBookmarks", "true") + params.Add("labelSelector", "watchlist=true") + params.Add("resourceVersionMatch", "NotOlderThan") + params.Add("sendInitialEvents", "true") + params.Add("watch", "true") + return params.Encode() +} + +func getExpectedListRequestMadeByConsistencyDetectorFor(rv string) string { + params := url.Values{} + params.Add("labelSelector", "watchlist=true") + params.Add("resourceVersion", rv) + params.Add("resourceVersionMatch", "Exact") + return params.Encode() +} -func getExpectedRequestMadeByClientFor(rv string) []string { +func getExpectedRequestsMadeByClientFor(rv string) []string { expectedRequestMadeByClient := []string{ - expectedStreamingRequestMadeByClient, + getExpectedStreamingRequestMadeByClient(), } if consistencydetector.IsDataConsistencyDetectionForWatchListEnabled() { // corresponds to a standard list request made by the consistency detector build in into the client - expectedRequestMadeByClient = append(expectedRequestMadeByClient, fmt.Sprintf("resourceVersion=%s&resourceVersionMatch=Exact", rv)) + expectedRequestMadeByClient = append(expectedRequestMadeByClient, getExpectedListRequestMadeByConsistencyDetectorFor(rv)) } return expectedRequestMadeByClient } -func getExpectedRequestMadeByClientWhenFallbackToListFor(rv string) []string { +func getExpectedRequestsMadeByClientWhenFallbackToListFor(rv string) []string { expectedRequestMadeByClient := []string{ - expectedStreamingRequestMadeByClient, + getExpectedStreamingRequestMadeByClient(), // corresponds to a list request made by the client - "", + func() string { + params := url.Values{} + params.Add("labelSelector", "watchlist=true") + return params.Encode() + }(), } if consistencydetector.IsDataConsistencyDetectionForListEnabled() { // corresponds to a standard list request made by the consistency detector build in into the client - expectedRequestMadeByClient = append(expectedRequestMadeByClient, fmt.Sprintf("resourceVersion=%s&resourceVersionMatch=Exact", rv)) + expectedRequestMadeByClient = append(expectedRequestMadeByClient, getExpectedListRequestMadeByConsistencyDetectorFor(rv)) } return expectedRequestMadeByClient } @@ -325,6 +348,11 @@ func addWellKnownUnstructuredSecrets(ctx context.Context, f *framework.Framework return secrets } +func withWellKnownListOptions(options metav1.ListOptions) metav1.ListOptions { + options.LabelSelector = "watchlist=true" + return options +} + type byName []v1.Secret func (a byName) Len() int { return len(a) } @@ -333,6 +361,9 @@ func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func newSecret(name string) *v1.Secret { return &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{Name: name}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{"watchlist": "true"}, + }, } } diff --git a/test/e2e/auth/selfsubjectreviews.go b/test/e2e/auth/selfsubjectreviews.go index c617edd98064d..e309c3d78e440 100644 --- a/test/e2e/auth/selfsubjectreviews.go +++ b/test/e2e/auth/selfsubjectreviews.go @@ -42,10 +42,8 @@ var _ = SIGDescribe("SelfSubjectReview", func() { Testname: SelfSubjectReview API Description: The authentication.k8s.io API group MUST exist in the /apis discovery document. - The authentication.k8s.io/v1alpha1 API group/version MUST exist in the /apis/mode.k8s.io discovery document. The authentication.k8s.io/v1beta1 API group/version MUST exist in the /apis/mode.k8s.io discovery document. The authentication.k8s.io/v1 API group/version MUST exist in the /apis/mode.k8s.io discovery document. - The selfsubjectreviews resource MUST exist in the /apis/authentication.k8s.io/v1alpha1 discovery document. The selfsubjectreviews resource MUST exist in the /apis/authentication.k8s.io/v1beta1 discovery document. The selfsubjectreviews resource MUST exist in the /apis/authentication.k8s.io/v1 discovery document. The selfsubjectreviews resource MUST support create. @@ -107,7 +105,8 @@ var _ = SIGDescribe("SelfSubjectReview", func() { } } }, - ginkgo.Entry("authentication/v1alpha1", "v1alpha1", authenticationv1alpha1.SchemeGroupVersion.String()), + // OpenShift: Skip v1alpha check + // ginkgo.Entry("authentication/v1alpha1", "v1alpha1", authenticationv1alpha1.SchemeGroupVersion.String()), ginkgo.Entry("authentication/v1beta1", "v1beta1", authenticationv1beta1.SchemeGroupVersion.String()), ginkgo.Entry("authentication/v1", "v1", authenticationv1.SchemeGroupVersion.String()), ) @@ -116,6 +115,9 @@ var _ = SIGDescribe("SelfSubjectReview", func() { // Check creating ginkgo.By("creating SSR authentication/v1alpha1") { + // OpenShift: Skip v1alpha check + ginkgo.Skip("No authentication/v1alpha1 available") + // Use impersonate to make user attributes predictable config := restConfig(f) diff --git a/test/e2e/auth/service_accounts.go b/test/e2e/auth/service_accounts.go index cdc32265d99fc..a9e576aaabe00 100644 --- a/test/e2e/auth/service_accounts.go +++ b/test/e2e/auth/service_accounts.go @@ -64,6 +64,17 @@ var _ = SIGDescribe("ServiceAccounts", func() { time.Sleep(10 * time.Second) sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(ctx, "default", metav1.GetOptions{}) framework.ExpectNoError(err) + + // TODO: Ignore the image pull secret that OpenShift sometimes creates and adds to the list of Secrets. + // TODO: This patch can be removed once OpenShift stops adding the pull secret to the list of secrets in 4.16. + secrets := sa.DeepCopy().Secrets + sa.Secrets = nil + for _, s := range secrets { + if strings.HasPrefix(s.Name, "default-dockercfg") { + continue + } + sa.Secrets = append(sa.Secrets, s) + } gomega.Expect(sa.Secrets).To(gomega.BeEmpty()) } }) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 32cc23528510c..d83228f0999e2 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -36,7 +36,6 @@ import ( "k8s.io/klog/v2" conformancetestdata "k8s.io/kubernetes/test/conformance/testdata" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/config" "k8s.io/kubernetes/test/e2e/framework/testfiles" e2etestingmanifests "k8s.io/kubernetes/test/e2e/testing-manifests" testfixtures "k8s.io/kubernetes/test/fixtures" @@ -73,21 +72,13 @@ import ( _ "k8s.io/kubernetes/test/utils/format" ) -// handleFlags sets up all flags and parses the command line. -func handleFlags() { - config.CopyFlags(config.Flags, flag.CommandLine) - framework.RegisterCommonFlags(flag.CommandLine) - framework.RegisterClusterFlags(flag.CommandLine) - flag.Parse() -} - func TestMain(m *testing.M) { var versionFlag bool flag.CommandLine.BoolVar(&versionFlag, "version", false, "Displays version information.") listConformanceTests := flag.CommandLine.Bool("list-conformance-tests", false, "If true, will show list of conformance tests.") // Register test flags, then parse flags. - handleFlags() + HandleFlags() if versionFlag { fmt.Printf("%s\n", version.Get()) diff --git a/test/e2e/framework/ginkgowrapper.go b/test/e2e/framework/ginkgowrapper.go index 8517e4b5182fb..1e38ce2a35ae9 100644 --- a/test/e2e/framework/ginkgowrapper.go +++ b/test/e2e/framework/ginkgowrapper.go @@ -326,7 +326,9 @@ func validateText(location types.CodeLocation, text string, labels []string) { } func recordTextBug(location types.CodeLocation, message string) { - RecordBug(Bug{FileName: location.FileName, LineNumber: location.LineNumber, Message: message}) + // TODO(soltysh): we need to figure out how we want to handle labels + // https://issues.redhat.com/browse/OCPBUGS-25641 + // RecordBug(Bug{FileName: location.FileName, LineNumber: location.LineNumber, Message: message}) } // WithEnvironment specifies that a certain test or group of tests only works diff --git a/test/e2e/framework/internal/unittests/bugs/bugs_test.go b/test/e2e/framework/internal/unittests/bugs/bugs_test.go index d651c8fa9b72c..bfa59f02cfc0e 100644 --- a/test/e2e/framework/internal/unittests/bugs/bugs_test.go +++ b/test/e2e/framework/internal/unittests/bugs/bugs_test.go @@ -27,6 +27,10 @@ import ( ) func TestBugs(t *testing.T) { + // TODO(soltysh): we need to figure out how we want to handle labels + // https://issues.redhat.com/browse/OCPBUGS-25641 + t.Skip("temporarily disabled") + require.NoError(t, framework.FormatBugs()) RecordBugs() Describe() diff --git a/test/e2e/framework/internal/unittests/cleanup/cleanup_test.go b/test/e2e/framework/internal/unittests/cleanup/cleanup_test.go index a0a2059b6bb04..4cac4ed3a0006 100644 --- a/test/e2e/framework/internal/unittests/cleanup/cleanup_test.go +++ b/test/e2e/framework/internal/unittests/cleanup/cleanup_test.go @@ -120,7 +120,6 @@ const ( < Exit [BeforeEach] e2e - cleanup_test.go:63